summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/msm
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig165
-rw-r--r--drivers/gpu/drm/msm/Makefile149
-rw-r--r--drivers/gpu/drm/msm/NOTES87
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h3212
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c550
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h3247
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c604
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h4349
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c730
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h5492
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c159
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c1786
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h174
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c390
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c302
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h7780
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c1658
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h190
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h483
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2078
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h91
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c1342
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h446
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c734
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h184
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h685
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c746
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1095
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h443
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h2365
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c534
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h121
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c1626
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h303
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2556
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h227
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h403
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c803
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c716
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c752
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1043
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c1959
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h895
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c713
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h277
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c206
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c125
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h98
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c574
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h77
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c382
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h116
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c206
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h113
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h459
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c85
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h67
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c320
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h186
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c815
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h395
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c331
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h159
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c492
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h360
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c264
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h120
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c277
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h115
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1337
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h207
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1539
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h120
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c674
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h112
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h978
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c358
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h75
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c89
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h1155
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h1181
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c666
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c175
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c213
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c111
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c600
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h219
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c445
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c121
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c161
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c419
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h1979
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c1333
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h126
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c203
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c1360
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c764
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h78
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c370
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c126
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c1009
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h327
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c168
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h36
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c175
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h46
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c1048
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c408
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h87
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h111
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c183
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.c138
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h142
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c138
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h144
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c196
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c667
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.h74
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c541
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c1096
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h138
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2049
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h40
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c300
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h74
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c1784
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h42
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c178
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h36
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.c67
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.h78
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c1223
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h156
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c464
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h99
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c293
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h153
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c257
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.h107
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h308
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c280
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h164
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h788
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c325
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h68
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2630
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c681
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h227
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h309
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h237
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h384
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h286
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h483
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h131
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c855
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h134
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c1061
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c1086
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c148
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c822
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c660
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c1104
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c614
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h267
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h1377
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c254
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c357
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c1428
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hpd.c263
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c267
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c217
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c51
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c765
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c44
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c453
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h61
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c288
-rw-r--r--drivers/gpu/drm/msm/msm_atomic_trace.h110
-rw-r--r--drivers/gpu/drm/msm/msm_atomic_tracepoints.c3
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c339
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.h14
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1349
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h558
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c285
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c204
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c116
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h78
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c1294
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h336
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c74
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c239
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c981
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c193
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c1033
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h700
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c377
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h184
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_tracepoints.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c121
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c148
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c399
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h206
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c474
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h62
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c236
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c428
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c132
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h120
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c307
220 files changed, 124163 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 000000000..3c9dfdb0b
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,165 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_MSM
+ tristate "MSM DRM"
+ depends on DRM
+ depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on IOMMU_SUPPORT
+ depends on QCOM_OCMEM || QCOM_OCMEM=n
+ depends on QCOM_LLCC || QCOM_LLCC=n
+ depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
+ select IOMMU_IO_PGTABLE
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select REGULATOR
+ select DRM_DP_AUX_BUS
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
+ select DRM_SCHED
+ select SHMEM
+ select TMPFS
+ select QCOM_SCM
+ select WANT_DEV_COREDUMP
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ select SYNC_FILE
+ select PM_OPP
+ select NVMEM
+ help
+ DRM/KMS driver for MSM/snapdragon.
+
+config DRM_MSM_GPU_STATE
+ bool
+ depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP)
+ default y
+
+config DRM_MSM_GPU_SUDO
+ bool "Enable SUDO flag on submits"
+ depends on DRM_MSM && EXPERT
+ default n
+ help
+ Enable userspace that has CAP_SYS_RAWIO to submit GPU commands
+ that are run from RB instead of IB1. This essentially gives
+ userspace kernel level access, but is useful for firmware
+ debugging.
+
+ Only use this if you are a driver developer. This should *not*
+ be enabled for production kernels. If unsure, say N.
+
+config DRM_MSM_MDSS
+ bool
+ depends on DRM_MSM
+ default n
+
+config DRM_MSM_MDP4
+ bool "Enable MDP4 support in MSM DRM driver"
+ depends on DRM_MSM
+ default y
+ help
+ Compile in support for the Mobile Display Processor v4 (MDP4) in
+ the MSM DRM driver. It is the older display controller found in
+ devices using APQ8064/MSM8960/MSM8x60 platforms.
+
+config DRM_MSM_MDP5
+ bool "Enable MDP5 support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_MSM_MDSS
+ default y
+ help
+ Compile in support for the Mobile Display Processor v5 (MDP5) in
+ the MSM DRM driver. It is the display controller found in devices
+ using e.g. APQ8016/MSM8916/APQ8096/MSM8996/MSM8974/SDM6x0 platforms.
+
+config DRM_MSM_DPU
+ bool "Enable DPU support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_MSM_MDSS
+ default y
+ help
+ Compile in support for the Display Processing Unit in
+ the MSM DRM driver. It is the display controller found in devices
+ using e.g. SDM845 and newer platforms.
+
+config DRM_MSM_DP
+ bool "Enable DisplayPort support in MSM DRM driver"
+ depends on DRM_MSM
+ select RATIONAL
+ default y
+ help
+ Compile in support for DP driver in MSM DRM driver. DP external
+ display support is enabled through this config option. It can
+ be primary or secondary display on device.
+
+config DRM_MSM_DSI
+ bool "Enable DSI support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ default y
+ help
+ Choose this option if you have a need for MIPI DSI connector
+ support.
+
+config DRM_MSM_DSI_28NM_PHY
+ bool "Enable DSI 28nm PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 28nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_20NM_PHY
+ bool "Enable DSI 20nm PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 20nm DSI PHY is used on the platform.
+
+config DRM_MSM_DSI_28NM_8960_PHY
+ bool "Enable DSI 28nm 8960 PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if the 28nm DSI PHY 8960 variant is used on the
+ platform.
+
+config DRM_MSM_DSI_14NM_PHY
+ bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on 8996 is used on the platform.
+
+config DRM_MSM_DSI_10NM_PHY
+ bool "Enable DSI 10nm PHY driver in MSM DRM (used by SDM845)"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on SDM845 is used on the platform.
+
+config DRM_MSM_DSI_7NM_PHY
+ bool "Enable DSI 7nm PHY driver in MSM DRM"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on
+ the platform.
+
+config DRM_MSM_HDMI
+ bool "Enable HDMI support in MSM DRM driver"
+ depends on DRM_MSM
+ default y
+ help
+ Compile in support for the HDMI output MSM DRM driver. It can
+ be a primary or a secondary display on device. Note that this is used
+ only for the direct HDMI output. If the device outputs HDMI data
+ through some kind of DSI-to-HDMI bridge, this option can be disabled.
+
+config DRM_MSM_HDMI_HDCP
+ bool "Enable HDMI HDCP support in MSM DRM driver"
+ depends on DRM_MSM && DRM_MSM_HDMI
+ default y
+ help
+ Choose this option to enable HDCP state machine
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 000000000..7274c4122
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y := -I $(srctree)/$(src)
+ccflags-y += -I $(srctree)/$(src)/disp/dpu1
+ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
+ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp
+
+msm-y := \
+ adreno/adreno_device.o \
+ adreno/adreno_gpu.o \
+ adreno/a2xx_gpu.o \
+ adreno/a3xx_gpu.o \
+ adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
+ adreno/a5xx_preempt.o \
+ adreno/a6xx_gpu.o \
+ adreno/a6xx_gmu.o \
+ adreno/a6xx_hfi.o \
+
+msm-$(CONFIG_DRM_MSM_HDMI) += \
+ hdmi/hdmi.o \
+ hdmi/hdmi_audio.o \
+ hdmi/hdmi_bridge.o \
+ hdmi/hdmi_hpd.o \
+ hdmi/hdmi_i2c.o \
+ hdmi/hdmi_phy.o \
+ hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8996.o \
+ hdmi/hdmi_phy_8x60.o \
+ hdmi/hdmi_phy_8x74.o \
+ hdmi/hdmi_pll_8960.o \
+
+msm-$(CONFIG_DRM_MSM_MDP4) += \
+ disp/mdp4/mdp4_crtc.o \
+ disp/mdp4/mdp4_dsi_encoder.o \
+ disp/mdp4/mdp4_dtv_encoder.o \
+ disp/mdp4/mdp4_lcdc_encoder.o \
+ disp/mdp4/mdp4_lvds_connector.o \
+ disp/mdp4/mdp4_lvds_pll.o \
+ disp/mdp4/mdp4_irq.o \
+ disp/mdp4/mdp4_kms.o \
+ disp/mdp4/mdp4_plane.o \
+
+msm-$(CONFIG_DRM_MSM_MDP5) += \
+ disp/mdp5/mdp5_cfg.o \
+ disp/mdp5/mdp5_cmd_encoder.o \
+ disp/mdp5/mdp5_ctl.o \
+ disp/mdp5/mdp5_crtc.o \
+ disp/mdp5/mdp5_encoder.o \
+ disp/mdp5/mdp5_irq.o \
+ disp/mdp5/mdp5_kms.o \
+ disp/mdp5/mdp5_pipe.o \
+ disp/mdp5/mdp5_mixer.o \
+ disp/mdp5/mdp5_plane.o \
+ disp/mdp5/mdp5_smp.o \
+
+msm-$(CONFIG_DRM_MSM_DPU) += \
+ disp/dpu1/dpu_core_perf.o \
+ disp/dpu1/dpu_crtc.o \
+ disp/dpu1/dpu_encoder.o \
+ disp/dpu1/dpu_encoder_phys_cmd.o \
+ disp/dpu1/dpu_encoder_phys_vid.o \
+ disp/dpu1/dpu_encoder_phys_wb.o \
+ disp/dpu1/dpu_formats.o \
+ disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_dsc.o \
+ disp/dpu1/dpu_hw_interrupts.o \
+ disp/dpu1/dpu_hw_intf.o \
+ disp/dpu1/dpu_hw_lm.o \
+ disp/dpu1/dpu_hw_pingpong.o \
+ disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_dspp.o \
+ disp/dpu1/dpu_hw_merge3d.o \
+ disp/dpu1/dpu_hw_top.o \
+ disp/dpu1/dpu_hw_util.o \
+ disp/dpu1/dpu_hw_vbif.o \
+ disp/dpu1/dpu_hw_wb.o \
+ disp/dpu1/dpu_kms.o \
+ disp/dpu1/dpu_plane.o \
+ disp/dpu1/dpu_rm.o \
+ disp/dpu1/dpu_vbif.o \
+ disp/dpu1/dpu_writeback.o
+
+msm-$(CONFIG_DRM_MSM_MDSS) += \
+ msm_mdss.o \
+
+msm-y += \
+ disp/mdp_format.o \
+ disp/mdp_kms.o \
+ disp/msm_disp_snapshot.o \
+ disp/msm_disp_snapshot_util.o \
+ msm_atomic.o \
+ msm_atomic_tracepoints.o \
+ msm_debugfs.o \
+ msm_drv.o \
+ msm_fb.o \
+ msm_fence.o \
+ msm_gem.o \
+ msm_gem_prime.o \
+ msm_gem_shrinker.o \
+ msm_gem_submit.o \
+ msm_gem_vma.o \
+ msm_gpu.o \
+ msm_gpu_devfreq.o \
+ msm_io_utils.o \
+ msm_iommu.o \
+ msm_perf.o \
+ msm_rd.o \
+ msm_ringbuffer.o \
+ msm_submitqueue.o \
+ msm_gpu_tracepoints.o \
+ msm_gpummu.o
+
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+ dp/dp_debug.o
+
+msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
+
+msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
+ dp/dp_catalog.o \
+ dp/dp_ctrl.o \
+ dp/dp_display.o \
+ dp/dp_drm.o \
+ dp/dp_hpd.o \
+ dp/dp_link.o \
+ dp/dp_panel.o \
+ dp/dp_parser.o \
+ dp/dp_power.o \
+ dp/dp_audio.o
+
+msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+
+msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
+
+msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_cfg.o \
+ dsi/dsi_host.o \
+ dsi/dsi_manager.o \
+ dsi/phy/dsi_phy.o
+
+msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
+msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
+msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
+
+obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 000000000..9c4255b98
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,87 @@
+NOTES about msm drm/kms driver:
+
+In the current snapdragon SoC's, we have (at least) 3 different
+display controller blocks at play:
+ + MDP3 - ?? seems to be what is on geeksphone peak device
+ + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
+ + MDP5 - snapdragon 800
+
+(I don't have a completely clear picture on which display controller
+maps to which part #)
+
+Plus a handful of blocks around them for HDMI/DSI/etc output.
+
+And on gpu side of things:
+ + zero, one, or two 2d cores (z180)
+ + and either a2xx or a3xx 3d core.
+
+But, HDMI/DSI/etc blocks seem like they can be shared across multiple
+display controller blocks. And I for sure don't want to have to deal
+with N different kms devices from xf86-video-freedreno. Plus, it
+seems like we can do some clever tricks like use GPU to trigger
+pageflip after rendering completes (ie. have the kms/crtc code build
+up gpu cmdstream to update scanout and write FLUSH register after).
+
+So, the approach is one drm driver, with some modularity. Different
+'struct msm_kms' implementations, depending on display controller.
+And one or more 'struct msm_gpu' for the various different gpu sub-
+modules.
+
+(Second part is not implemented yet. So far this is just basic KMS
+driver, and not exposing any custom ioctls to userspace for now.)
+
+The kms module provides the plane, crtc, and encoder objects, and
+loads whatever connectors are appropriate.
+
+For MDP4, the mapping is:
+
+ plane -> PIPE{RGBn,VGn} \
+ crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
+ encoder -> DTV/LCDC/DSI (within MDP4) /
+ connector -> HDMI/DSI/etc --> other device(s)
+
+Since the irq's that drm core mostly cares about are vblank/framedone,
+we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
+and treat the MDP4 block's irq as "the" irq. Even though the connectors
+may have their own irqs which they install themselves. For this reason
+the display controller is the "master" device.
+
+For MDP5, the mapping is:
+
+ plane -> PIPE{RGBn,VIGn} \
+ crtc -> LM (layer mixer) |-> MDP "device"
+ encoder -> INTF /
+ connector -> HDMI/DSI/eDP/etc --> other device(s)
+
+Unlike MDP4, it appears we can get by with a single encoder, rather
+than needing a different implementation for DTV, DSI, etc. (Ie. the
+register interface is same, just different bases.)
+
+Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI,
+etc) are routed through MDP.
+
+And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from
+which blocks need to be allocated to the active pipes based on fetch
+stride.
+
+Each connector probably ends up being a separate device, just for the
+logistics of finding/mapping io region, irq, etc. Idealy we would
+have a better way than just stashing the platform device in a global
+(ie. like DT super-node.. but I don't have any snapdragon hw yet that
+is using DT).
+
+Note that so far I've not been able to get any docs on the hw, and it
+seems that access to such docs would prevent me from working on the
+freedreno gallium driver. So there may be some mistakes in register
+names (I had to invent a few, since no sufficient hint was given in
+the downstream android fbdev driver), bitfield sizes, etc. My current
+state of understanding the registers is given in the envytools rnndb
+files at:
+
+ https://github.com/freedreno/envytools/tree/master/rnndb
+ (the mdp4/hdmi/dsi directories)
+
+These files are used both for a parser tool (in the same tree) to
+parse logged register reads/writes (both from downstream android fbdev
+driver, and this driver with register logging enabled), as well as to
+generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 000000000..afa602334
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,3212 @@
+#ifndef A2XX_XML
+#define A2XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a2xx_rb_dither_type {
+ DITHER_PIXEL = 0,
+ DITHER_SUBPIXEL = 1,
+};
+
+enum a2xx_colorformatx {
+ COLORX_4_4_4_4 = 0,
+ COLORX_1_5_5_5 = 1,
+ COLORX_5_6_5 = 2,
+ COLORX_8 = 3,
+ COLORX_8_8 = 4,
+ COLORX_8_8_8_8 = 5,
+ COLORX_S8_8_8_8 = 6,
+ COLORX_16_FLOAT = 7,
+ COLORX_16_16_FLOAT = 8,
+ COLORX_16_16_16_16_FLOAT = 9,
+ COLORX_32_FLOAT = 10,
+ COLORX_32_32_FLOAT = 11,
+ COLORX_32_32_32_32_FLOAT = 12,
+ COLORX_2_3_3 = 13,
+ COLORX_8_8_8 = 14,
+};
+
+enum a2xx_sq_surfaceformat {
+ FMT_1_REVERSE = 0,
+ FMT_1 = 1,
+ FMT_8 = 2,
+ FMT_1_5_5_5 = 3,
+ FMT_5_6_5 = 4,
+ FMT_6_5_5 = 5,
+ FMT_8_8_8_8 = 6,
+ FMT_2_10_10_10 = 7,
+ FMT_8_A = 8,
+ FMT_8_B = 9,
+ FMT_8_8 = 10,
+ FMT_Cr_Y1_Cb_Y0 = 11,
+ FMT_Y1_Cr_Y0_Cb = 12,
+ FMT_5_5_5_1 = 13,
+ FMT_8_8_8_8_A = 14,
+ FMT_4_4_4_4 = 15,
+ FMT_8_8_8 = 16,
+ FMT_DXT1 = 18,
+ FMT_DXT2_3 = 19,
+ FMT_DXT4_5 = 20,
+ FMT_10_10_10_2 = 21,
+ FMT_24_8 = 22,
+ FMT_16 = 24,
+ FMT_16_16 = 25,
+ FMT_16_16_16_16 = 26,
+ FMT_16_EXPAND = 27,
+ FMT_16_16_EXPAND = 28,
+ FMT_16_16_16_16_EXPAND = 29,
+ FMT_16_FLOAT = 30,
+ FMT_16_16_FLOAT = 31,
+ FMT_16_16_16_16_FLOAT = 32,
+ FMT_32 = 33,
+ FMT_32_32 = 34,
+ FMT_32_32_32_32 = 35,
+ FMT_32_FLOAT = 36,
+ FMT_32_32_FLOAT = 37,
+ FMT_32_32_32_32_FLOAT = 38,
+ FMT_ATI_TC_RGB = 39,
+ FMT_ATI_TC_RGBA = 40,
+ FMT_ATI_TC_555_565_RGB = 41,
+ FMT_ATI_TC_555_565_RGBA = 42,
+ FMT_ATI_TC_RGBA_INTERP = 43,
+ FMT_ATI_TC_555_565_RGBA_INTERP = 44,
+ FMT_ETC1_RGBA_INTERP = 46,
+ FMT_ETC1_RGB = 47,
+ FMT_ETC1_RGBA = 48,
+ FMT_DXN = 49,
+ FMT_2_3_3 = 51,
+ FMT_2_10_10_10_AS_16_16_16_16 = 54,
+ FMT_10_10_10_2_AS_16_16_16_16 = 55,
+ FMT_32_32_32_FLOAT = 57,
+ FMT_DXT3A = 58,
+ FMT_DXT5A = 59,
+ FMT_CTX1 = 60,
+};
+
+enum a2xx_sq_ps_vtx_mode {
+ POSITION_1_VECTOR = 0,
+ POSITION_2_VECTORS_UNUSED = 1,
+ POSITION_2_VECTORS_SPRITE = 2,
+ POSITION_2_VECTORS_EDGE = 3,
+ POSITION_2_VECTORS_KILL = 4,
+ POSITION_2_VECTORS_SPRITE_KILL = 5,
+ POSITION_2_VECTORS_EDGE_KILL = 6,
+ MULTIPASS = 7,
+};
+
+enum a2xx_sq_sample_cntl {
+ CENTROIDS_ONLY = 0,
+ CENTERS_ONLY = 1,
+ CENTROIDS_AND_CENTERS = 2,
+};
+
+enum a2xx_dx_clip_space {
+ DXCLIP_OPENGL = 0,
+ DXCLIP_DIRECTX = 1,
+};
+
+enum a2xx_pa_su_sc_polymode {
+ POLY_DISABLED = 0,
+ POLY_DUALMODE = 1,
+};
+
+enum a2xx_rb_edram_mode {
+ EDRAM_NOP = 0,
+ COLOR_DEPTH = 4,
+ DEPTH_ONLY = 5,
+ EDRAM_COPY = 6,
+};
+
+enum a2xx_pa_sc_pattern_bit_order {
+ LITTLE = 0,
+ BIG = 1,
+};
+
+enum a2xx_pa_sc_auto_reset_cntl {
+ NEVER = 0,
+ EACH_PRIMITIVE = 1,
+ EACH_PACKET = 2,
+};
+
+enum a2xx_pa_pixcenter {
+ PIXCENTER_D3D = 0,
+ PIXCENTER_OGL = 1,
+};
+
+enum a2xx_pa_roundmode {
+ TRUNCATE = 0,
+ ROUND = 1,
+ ROUNDTOEVEN = 2,
+ ROUNDTOODD = 3,
+};
+
+enum a2xx_pa_quantmode {
+ ONE_SIXTEENTH = 0,
+ ONE_EIGTH = 1,
+ ONE_QUARTER = 2,
+ ONE_HALF = 3,
+ ONE = 4,
+};
+
+enum a2xx_rb_copy_sample_select {
+ SAMPLE_0 = 0,
+ SAMPLE_1 = 1,
+ SAMPLE_2 = 2,
+ SAMPLE_3 = 3,
+ SAMPLE_01 = 4,
+ SAMPLE_23 = 5,
+ SAMPLE_0123 = 6,
+};
+
+enum a2xx_rb_blend_opcode {
+ BLEND2_DST_PLUS_SRC = 0,
+ BLEND2_SRC_MINUS_DST = 1,
+ BLEND2_MIN_DST_SRC = 2,
+ BLEND2_MAX_DST_SRC = 3,
+ BLEND2_DST_MINUS_SRC = 4,
+ BLEND2_DST_PLUS_SRC_BIAS = 5,
+};
+
+enum a2xx_su_perfcnt_select {
+ PERF_PAPC_PASX_REQ = 0,
+ PERF_PAPC_PASX_FIRST_VECTOR = 2,
+ PERF_PAPC_PASX_SECOND_VECTOR = 3,
+ PERF_PAPC_PASX_FIRST_DEAD = 4,
+ PERF_PAPC_PASX_SECOND_DEAD = 5,
+ PERF_PAPC_PASX_VTX_KILL_DISCARD = 6,
+ PERF_PAPC_PASX_VTX_NAN_DISCARD = 7,
+ PERF_PAPC_PA_INPUT_PRIM = 8,
+ PERF_PAPC_PA_INPUT_NULL_PRIM = 9,
+ PERF_PAPC_PA_INPUT_EVENT_FLAG = 10,
+ PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 11,
+ PERF_PAPC_PA_INPUT_END_OF_PACKET = 12,
+ PERF_PAPC_CLPR_CULL_PRIM = 13,
+ PERF_PAPC_CLPR_VV_CULL_PRIM = 15,
+ PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 17,
+ PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 18,
+ PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 19,
+ PERF_PAPC_CLPR_VV_CLIP_PRIM = 21,
+ PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 23,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 24,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 25,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 26,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 27,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_5 = 28,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_6 = 29,
+ PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 30,
+ PERF_PAPC_CLPR_CLIP_PLANE_FAR = 31,
+ PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 32,
+ PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 33,
+ PERF_PAPC_CLPR_CLIP_PLANE_TOP = 34,
+ PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 35,
+ PERF_PAPC_CLSM_NULL_PRIM = 36,
+ PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 37,
+ PERF_PAPC_CLSM_CLIP_PRIM = 38,
+ PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 39,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 40,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 41,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 42,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 43,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_5 = 44,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7 = 45,
+ PERF_PAPC_CLSM_NON_TRIVIAL_CULL = 46,
+ PERF_PAPC_SU_INPUT_PRIM = 47,
+ PERF_PAPC_SU_INPUT_CLIP_PRIM = 48,
+ PERF_PAPC_SU_INPUT_NULL_PRIM = 49,
+ PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 50,
+ PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 51,
+ PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 52,
+ PERF_PAPC_SU_POLYMODE_FACE_CULL = 53,
+ PERF_PAPC_SU_POLYMODE_BACK_CULL = 54,
+ PERF_PAPC_SU_POLYMODE_FRONT_CULL = 55,
+ PERF_PAPC_SU_POLYMODE_INVALID_FILL = 56,
+ PERF_PAPC_SU_OUTPUT_PRIM = 57,
+ PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 58,
+ PERF_PAPC_SU_OUTPUT_NULL_PRIM = 59,
+ PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 60,
+ PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 61,
+ PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 62,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 63,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 64,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 65,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 66,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 67,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 68,
+ PERF_PAPC_PASX_REQ_IDLE = 69,
+ PERF_PAPC_PASX_REQ_BUSY = 70,
+ PERF_PAPC_PASX_REQ_STALLED = 71,
+ PERF_PAPC_PASX_REC_IDLE = 72,
+ PERF_PAPC_PASX_REC_BUSY = 73,
+ PERF_PAPC_PASX_REC_STARVED_SX = 74,
+ PERF_PAPC_PASX_REC_STALLED = 75,
+ PERF_PAPC_PASX_REC_STALLED_POS_MEM = 76,
+ PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 77,
+ PERF_PAPC_CCGSM_IDLE = 78,
+ PERF_PAPC_CCGSM_BUSY = 79,
+ PERF_PAPC_CCGSM_STALLED = 80,
+ PERF_PAPC_CLPRIM_IDLE = 81,
+ PERF_PAPC_CLPRIM_BUSY = 82,
+ PERF_PAPC_CLPRIM_STALLED = 83,
+ PERF_PAPC_CLPRIM_STARVED_CCGSM = 84,
+ PERF_PAPC_CLIPSM_IDLE = 85,
+ PERF_PAPC_CLIPSM_BUSY = 86,
+ PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 87,
+ PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 88,
+ PERF_PAPC_CLIPSM_WAIT_CLIPGA = 89,
+ PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 90,
+ PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 91,
+ PERF_PAPC_CLIPGA_IDLE = 92,
+ PERF_PAPC_CLIPGA_BUSY = 93,
+ PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 94,
+ PERF_PAPC_CLIPGA_STALLED = 95,
+ PERF_PAPC_CLIP_IDLE = 96,
+ PERF_PAPC_CLIP_BUSY = 97,
+ PERF_PAPC_SU_IDLE = 98,
+ PERF_PAPC_SU_BUSY = 99,
+ PERF_PAPC_SU_STARVED_CLIP = 100,
+ PERF_PAPC_SU_STALLED_SC = 101,
+ PERF_PAPC_SU_FACENESS_CULL = 102,
+};
+
+enum a2xx_sc_perfcnt_select {
+ SC_SR_WINDOW_VALID = 0,
+ SC_CW_WINDOW_VALID = 1,
+ SC_QM_WINDOW_VALID = 2,
+ SC_FW_WINDOW_VALID = 3,
+ SC_EZ_WINDOW_VALID = 4,
+ SC_IT_WINDOW_VALID = 5,
+ SC_STARVED_BY_PA = 6,
+ SC_STALLED_BY_RB_TILE = 7,
+ SC_STALLED_BY_RB_SAMP = 8,
+ SC_STARVED_BY_RB_EZ = 9,
+ SC_STALLED_BY_SAMPLE_FF = 10,
+ SC_STALLED_BY_SQ = 11,
+ SC_STALLED_BY_SP = 12,
+ SC_TOTAL_NO_PRIMS = 13,
+ SC_NON_EMPTY_PRIMS = 14,
+ SC_NO_TILES_PASSING_QM = 15,
+ SC_NO_PIXELS_PRE_EZ = 16,
+ SC_NO_PIXELS_POST_EZ = 17,
+};
+
+enum a2xx_vgt_perfcount_select {
+ VGT_SQ_EVENT_WINDOW_ACTIVE = 0,
+ VGT_SQ_SEND = 1,
+ VGT_SQ_STALLED = 2,
+ VGT_SQ_STARVED_BUSY = 3,
+ VGT_SQ_STARVED_IDLE = 4,
+ VGT_SQ_STATIC = 5,
+ VGT_PA_EVENT_WINDOW_ACTIVE = 6,
+ VGT_PA_CLIP_V_SEND = 7,
+ VGT_PA_CLIP_V_STALLED = 8,
+ VGT_PA_CLIP_V_STARVED_BUSY = 9,
+ VGT_PA_CLIP_V_STARVED_IDLE = 10,
+ VGT_PA_CLIP_V_STATIC = 11,
+ VGT_PA_CLIP_P_SEND = 12,
+ VGT_PA_CLIP_P_STALLED = 13,
+ VGT_PA_CLIP_P_STARVED_BUSY = 14,
+ VGT_PA_CLIP_P_STARVED_IDLE = 15,
+ VGT_PA_CLIP_P_STATIC = 16,
+ VGT_PA_CLIP_S_SEND = 17,
+ VGT_PA_CLIP_S_STALLED = 18,
+ VGT_PA_CLIP_S_STARVED_BUSY = 19,
+ VGT_PA_CLIP_S_STARVED_IDLE = 20,
+ VGT_PA_CLIP_S_STATIC = 21,
+ RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 22,
+ RBIU_IMMED_DATA_FIFO_STARVED = 23,
+ RBIU_IMMED_DATA_FIFO_STALLED = 24,
+ RBIU_DMA_REQUEST_FIFO_STARVED = 25,
+ RBIU_DMA_REQUEST_FIFO_STALLED = 26,
+ RBIU_DRAW_INITIATOR_FIFO_STARVED = 27,
+ RBIU_DRAW_INITIATOR_FIFO_STALLED = 28,
+ BIN_PRIM_NEAR_CULL = 29,
+ BIN_PRIM_ZERO_CULL = 30,
+ BIN_PRIM_FAR_CULL = 31,
+ BIN_PRIM_BIN_CULL = 32,
+ BIN_PRIM_FACE_CULL = 33,
+ SPARE34 = 34,
+ SPARE35 = 35,
+ SPARE36 = 36,
+ SPARE37 = 37,
+ SPARE38 = 38,
+ SPARE39 = 39,
+ TE_SU_IN_VALID = 40,
+ TE_SU_IN_READ = 41,
+ TE_SU_IN_PRIM = 42,
+ TE_SU_IN_EOP = 43,
+ TE_SU_IN_NULL_PRIM = 44,
+ TE_WK_IN_VALID = 45,
+ TE_WK_IN_READ = 46,
+ TE_OUT_PRIM_VALID = 47,
+ TE_OUT_PRIM_READ = 48,
+};
+
+enum a2xx_tcr_perfcount_select {
+ DGMMPD_IPMUX0_STALL = 0,
+ DGMMPD_IPMUX_ALL_STALL = 4,
+ OPMUX0_L2_WRITES = 5,
+};
+
+enum a2xx_tp_perfcount_select {
+ POINT_QUADS = 0,
+ BILIN_QUADS = 1,
+ ANISO_QUADS = 2,
+ MIP_QUADS = 3,
+ VOL_QUADS = 4,
+ MIP_VOL_QUADS = 5,
+ MIP_ANISO_QUADS = 6,
+ VOL_ANISO_QUADS = 7,
+ ANISO_2_1_QUADS = 8,
+ ANISO_4_1_QUADS = 9,
+ ANISO_6_1_QUADS = 10,
+ ANISO_8_1_QUADS = 11,
+ ANISO_10_1_QUADS = 12,
+ ANISO_12_1_QUADS = 13,
+ ANISO_14_1_QUADS = 14,
+ ANISO_16_1_QUADS = 15,
+ MIP_VOL_ANISO_QUADS = 16,
+ ALIGN_2_QUADS = 17,
+ ALIGN_4_QUADS = 18,
+ PIX_0_QUAD = 19,
+ PIX_1_QUAD = 20,
+ PIX_2_QUAD = 21,
+ PIX_3_QUAD = 22,
+ PIX_4_QUAD = 23,
+ TP_MIPMAP_LOD0 = 24,
+ TP_MIPMAP_LOD1 = 25,
+ TP_MIPMAP_LOD2 = 26,
+ TP_MIPMAP_LOD3 = 27,
+ TP_MIPMAP_LOD4 = 28,
+ TP_MIPMAP_LOD5 = 29,
+ TP_MIPMAP_LOD6 = 30,
+ TP_MIPMAP_LOD7 = 31,
+ TP_MIPMAP_LOD8 = 32,
+ TP_MIPMAP_LOD9 = 33,
+ TP_MIPMAP_LOD10 = 34,
+ TP_MIPMAP_LOD11 = 35,
+ TP_MIPMAP_LOD12 = 36,
+ TP_MIPMAP_LOD13 = 37,
+ TP_MIPMAP_LOD14 = 38,
+};
+
+enum a2xx_tcm_perfcount_select {
+ QUAD0_RD_LAT_FIFO_EMPTY = 0,
+ QUAD0_RD_LAT_FIFO_4TH_FULL = 3,
+ QUAD0_RD_LAT_FIFO_HALF_FULL = 4,
+ QUAD0_RD_LAT_FIFO_FULL = 5,
+ QUAD0_RD_LAT_FIFO_LT_4TH_FULL = 6,
+ READ_STARVED_QUAD0 = 28,
+ READ_STARVED = 32,
+ READ_STALLED_QUAD0 = 33,
+ READ_STALLED = 37,
+ VALID_READ_QUAD0 = 38,
+ TC_TP_STARVED_QUAD0 = 42,
+ TC_TP_STARVED = 46,
+};
+
+enum a2xx_tcf_perfcount_select {
+ VALID_CYCLES = 0,
+ SINGLE_PHASES = 1,
+ ANISO_PHASES = 2,
+ MIP_PHASES = 3,
+ VOL_PHASES = 4,
+ MIP_VOL_PHASES = 5,
+ MIP_ANISO_PHASES = 6,
+ VOL_ANISO_PHASES = 7,
+ ANISO_2_1_PHASES = 8,
+ ANISO_4_1_PHASES = 9,
+ ANISO_6_1_PHASES = 10,
+ ANISO_8_1_PHASES = 11,
+ ANISO_10_1_PHASES = 12,
+ ANISO_12_1_PHASES = 13,
+ ANISO_14_1_PHASES = 14,
+ ANISO_16_1_PHASES = 15,
+ MIP_VOL_ANISO_PHASES = 16,
+ ALIGN_2_PHASES = 17,
+ ALIGN_4_PHASES = 18,
+ TPC_BUSY = 19,
+ TPC_STALLED = 20,
+ TPC_STARVED = 21,
+ TPC_WORKING = 22,
+ TPC_WALKER_BUSY = 23,
+ TPC_WALKER_STALLED = 24,
+ TPC_WALKER_WORKING = 25,
+ TPC_ALIGNER_BUSY = 26,
+ TPC_ALIGNER_STALLED = 27,
+ TPC_ALIGNER_STALLED_BY_BLEND = 28,
+ TPC_ALIGNER_STALLED_BY_CACHE = 29,
+ TPC_ALIGNER_WORKING = 30,
+ TPC_BLEND_BUSY = 31,
+ TPC_BLEND_SYNC = 32,
+ TPC_BLEND_STARVED = 33,
+ TPC_BLEND_WORKING = 34,
+ OPCODE_0x00 = 35,
+ OPCODE_0x01 = 36,
+ OPCODE_0x04 = 37,
+ OPCODE_0x10 = 38,
+ OPCODE_0x11 = 39,
+ OPCODE_0x12 = 40,
+ OPCODE_0x13 = 41,
+ OPCODE_0x18 = 42,
+ OPCODE_0x19 = 43,
+ OPCODE_0x1A = 44,
+ OPCODE_OTHER = 45,
+ IN_FIFO_0_EMPTY = 56,
+ IN_FIFO_0_LT_HALF_FULL = 57,
+ IN_FIFO_0_HALF_FULL = 58,
+ IN_FIFO_0_FULL = 59,
+ IN_FIFO_TPC_EMPTY = 72,
+ IN_FIFO_TPC_LT_HALF_FULL = 73,
+ IN_FIFO_TPC_HALF_FULL = 74,
+ IN_FIFO_TPC_FULL = 75,
+ TPC_TC_XFC = 76,
+ TPC_TC_STATE = 77,
+ TC_STALL = 78,
+ QUAD0_TAPS = 79,
+ QUADS = 83,
+ TCA_SYNC_STALL = 84,
+ TAG_STALL = 85,
+ TCB_SYNC_STALL = 88,
+ TCA_VALID = 89,
+ PROBES_VALID = 90,
+ MISS_STALL = 91,
+ FETCH_FIFO_STALL = 92,
+ TCO_STALL = 93,
+ ANY_STALL = 94,
+ TAG_MISSES = 95,
+ TAG_HITS = 96,
+ SUB_TAG_MISSES = 97,
+ SET0_INVALIDATES = 98,
+ SET1_INVALIDATES = 99,
+ SET2_INVALIDATES = 100,
+ SET3_INVALIDATES = 101,
+ SET0_TAG_MISSES = 102,
+ SET1_TAG_MISSES = 103,
+ SET2_TAG_MISSES = 104,
+ SET3_TAG_MISSES = 105,
+ SET0_TAG_HITS = 106,
+ SET1_TAG_HITS = 107,
+ SET2_TAG_HITS = 108,
+ SET3_TAG_HITS = 109,
+ SET0_SUB_TAG_MISSES = 110,
+ SET1_SUB_TAG_MISSES = 111,
+ SET2_SUB_TAG_MISSES = 112,
+ SET3_SUB_TAG_MISSES = 113,
+ SET0_EVICT1 = 114,
+ SET0_EVICT2 = 115,
+ SET0_EVICT3 = 116,
+ SET0_EVICT4 = 117,
+ SET0_EVICT5 = 118,
+ SET0_EVICT6 = 119,
+ SET0_EVICT7 = 120,
+ SET0_EVICT8 = 121,
+ SET1_EVICT1 = 130,
+ SET1_EVICT2 = 131,
+ SET1_EVICT3 = 132,
+ SET1_EVICT4 = 133,
+ SET1_EVICT5 = 134,
+ SET1_EVICT6 = 135,
+ SET1_EVICT7 = 136,
+ SET1_EVICT8 = 137,
+ SET2_EVICT1 = 146,
+ SET2_EVICT2 = 147,
+ SET2_EVICT3 = 148,
+ SET2_EVICT4 = 149,
+ SET2_EVICT5 = 150,
+ SET2_EVICT6 = 151,
+ SET2_EVICT7 = 152,
+ SET2_EVICT8 = 153,
+ SET3_EVICT1 = 162,
+ SET3_EVICT2 = 163,
+ SET3_EVICT3 = 164,
+ SET3_EVICT4 = 165,
+ SET3_EVICT5 = 166,
+ SET3_EVICT6 = 167,
+ SET3_EVICT7 = 168,
+ SET3_EVICT8 = 169,
+ FF_EMPTY = 178,
+ FF_LT_HALF_FULL = 179,
+ FF_HALF_FULL = 180,
+ FF_FULL = 181,
+ FF_XFC = 182,
+ FF_STALLED = 183,
+ FG_MASKS = 184,
+ FG_LEFT_MASKS = 185,
+ FG_LEFT_MASK_STALLED = 186,
+ FG_LEFT_NOT_DONE_STALL = 187,
+ FG_LEFT_FG_STALL = 188,
+ FG_LEFT_SECTORS = 189,
+ FG0_REQUESTS = 195,
+ FG0_STALLED = 196,
+ MEM_REQ512 = 199,
+ MEM_REQ_SENT = 200,
+ MEM_LOCAL_READ_REQ = 202,
+ TC0_MH_STALLED = 203,
+};
+
+enum a2xx_sq_perfcnt_select {
+ SQ_PIXEL_VECTORS_SUB = 0,
+ SQ_VERTEX_VECTORS_SUB = 1,
+ SQ_ALU0_ACTIVE_VTX_SIMD0 = 2,
+ SQ_ALU1_ACTIVE_VTX_SIMD0 = 3,
+ SQ_ALU0_ACTIVE_PIX_SIMD0 = 4,
+ SQ_ALU1_ACTIVE_PIX_SIMD0 = 5,
+ SQ_ALU0_ACTIVE_VTX_SIMD1 = 6,
+ SQ_ALU1_ACTIVE_VTX_SIMD1 = 7,
+ SQ_ALU0_ACTIVE_PIX_SIMD1 = 8,
+ SQ_ALU1_ACTIVE_PIX_SIMD1 = 9,
+ SQ_EXPORT_CYCLES = 10,
+ SQ_ALU_CST_WRITTEN = 11,
+ SQ_TEX_CST_WRITTEN = 12,
+ SQ_ALU_CST_STALL = 13,
+ SQ_ALU_TEX_STALL = 14,
+ SQ_INST_WRITTEN = 15,
+ SQ_BOOLEAN_WRITTEN = 16,
+ SQ_LOOPS_WRITTEN = 17,
+ SQ_PIXEL_SWAP_IN = 18,
+ SQ_PIXEL_SWAP_OUT = 19,
+ SQ_VERTEX_SWAP_IN = 20,
+ SQ_VERTEX_SWAP_OUT = 21,
+ SQ_ALU_VTX_INST_ISSUED = 22,
+ SQ_TEX_VTX_INST_ISSUED = 23,
+ SQ_VC_VTX_INST_ISSUED = 24,
+ SQ_CF_VTX_INST_ISSUED = 25,
+ SQ_ALU_PIX_INST_ISSUED = 26,
+ SQ_TEX_PIX_INST_ISSUED = 27,
+ SQ_VC_PIX_INST_ISSUED = 28,
+ SQ_CF_PIX_INST_ISSUED = 29,
+ SQ_ALU0_FIFO_EMPTY_SIMD0 = 30,
+ SQ_ALU1_FIFO_EMPTY_SIMD0 = 31,
+ SQ_ALU0_FIFO_EMPTY_SIMD1 = 32,
+ SQ_ALU1_FIFO_EMPTY_SIMD1 = 33,
+ SQ_ALU_NOPS = 34,
+ SQ_PRED_SKIP = 35,
+ SQ_SYNC_ALU_STALL_SIMD0_VTX = 36,
+ SQ_SYNC_ALU_STALL_SIMD1_VTX = 37,
+ SQ_SYNC_TEX_STALL_VTX = 38,
+ SQ_SYNC_VC_STALL_VTX = 39,
+ SQ_CONSTANTS_USED_SIMD0 = 40,
+ SQ_CONSTANTS_SENT_SP_SIMD0 = 41,
+ SQ_GPR_STALL_VTX = 42,
+ SQ_GPR_STALL_PIX = 43,
+ SQ_VTX_RS_STALL = 44,
+ SQ_PIX_RS_STALL = 45,
+ SQ_SX_PC_FULL = 46,
+ SQ_SX_EXP_BUFF_FULL = 47,
+ SQ_SX_POS_BUFF_FULL = 48,
+ SQ_INTERP_QUADS = 49,
+ SQ_INTERP_ACTIVE = 50,
+ SQ_IN_PIXEL_STALL = 51,
+ SQ_IN_VTX_STALL = 52,
+ SQ_VTX_CNT = 53,
+ SQ_VTX_VECTOR2 = 54,
+ SQ_VTX_VECTOR3 = 55,
+ SQ_VTX_VECTOR4 = 56,
+ SQ_PIXEL_VECTOR1 = 57,
+ SQ_PIXEL_VECTOR23 = 58,
+ SQ_PIXEL_VECTOR4 = 59,
+ SQ_CONSTANTS_USED_SIMD1 = 60,
+ SQ_CONSTANTS_SENT_SP_SIMD1 = 61,
+ SQ_SX_MEM_EXP_FULL = 62,
+ SQ_ALU0_ACTIVE_VTX_SIMD2 = 63,
+ SQ_ALU1_ACTIVE_VTX_SIMD2 = 64,
+ SQ_ALU0_ACTIVE_PIX_SIMD2 = 65,
+ SQ_ALU1_ACTIVE_PIX_SIMD2 = 66,
+ SQ_ALU0_ACTIVE_VTX_SIMD3 = 67,
+ SQ_PERFCOUNT_VTX_QUAL_TP_DONE = 68,
+ SQ_ALU0_ACTIVE_PIX_SIMD3 = 69,
+ SQ_PERFCOUNT_PIX_QUAL_TP_DONE = 70,
+ SQ_ALU0_FIFO_EMPTY_SIMD2 = 71,
+ SQ_ALU1_FIFO_EMPTY_SIMD2 = 72,
+ SQ_ALU0_FIFO_EMPTY_SIMD3 = 73,
+ SQ_ALU1_FIFO_EMPTY_SIMD3 = 74,
+ SQ_SYNC_ALU_STALL_SIMD2_VTX = 75,
+ SQ_PERFCOUNT_VTX_POP_THREAD = 76,
+ SQ_SYNC_ALU_STALL_SIMD0_PIX = 77,
+ SQ_SYNC_ALU_STALL_SIMD1_PIX = 78,
+ SQ_SYNC_ALU_STALL_SIMD2_PIX = 79,
+ SQ_PERFCOUNT_PIX_POP_THREAD = 80,
+ SQ_SYNC_TEX_STALL_PIX = 81,
+ SQ_SYNC_VC_STALL_PIX = 82,
+ SQ_CONSTANTS_USED_SIMD2 = 83,
+ SQ_CONSTANTS_SENT_SP_SIMD2 = 84,
+ SQ_PERFCOUNT_VTX_DEALLOC_ACK = 85,
+ SQ_PERFCOUNT_PIX_DEALLOC_ACK = 86,
+ SQ_ALU0_FIFO_FULL_SIMD0 = 87,
+ SQ_ALU1_FIFO_FULL_SIMD0 = 88,
+ SQ_ALU0_FIFO_FULL_SIMD1 = 89,
+ SQ_ALU1_FIFO_FULL_SIMD1 = 90,
+ SQ_ALU0_FIFO_FULL_SIMD2 = 91,
+ SQ_ALU1_FIFO_FULL_SIMD2 = 92,
+ SQ_ALU0_FIFO_FULL_SIMD3 = 93,
+ SQ_ALU1_FIFO_FULL_SIMD3 = 94,
+ VC_PERF_STATIC = 95,
+ VC_PERF_STALLED = 96,
+ VC_PERF_STARVED = 97,
+ VC_PERF_SEND = 98,
+ VC_PERF_ACTUAL_STARVED = 99,
+ PIXEL_THREAD_0_ACTIVE = 100,
+ VERTEX_THREAD_0_ACTIVE = 101,
+ PIXEL_THREAD_0_NUMBER = 102,
+ VERTEX_THREAD_0_NUMBER = 103,
+ VERTEX_EVENT_NUMBER = 104,
+ PIXEL_EVENT_NUMBER = 105,
+ PTRBUFF_EF_PUSH = 106,
+ PTRBUFF_EF_POP_EVENT = 107,
+ PTRBUFF_EF_POP_NEW_VTX = 108,
+ PTRBUFF_EF_POP_DEALLOC = 109,
+ PTRBUFF_EF_POP_PVECTOR = 110,
+ PTRBUFF_EF_POP_PVECTOR_X = 111,
+ PTRBUFF_EF_POP_PVECTOR_VNZ = 112,
+ PTRBUFF_PB_DEALLOC = 113,
+ PTRBUFF_PI_STATE_PPB_POP = 114,
+ PTRBUFF_PI_RTR = 115,
+ PTRBUFF_PI_READ_EN = 116,
+ PTRBUFF_PI_BUFF_SWAP = 117,
+ PTRBUFF_SQ_FREE_BUFF = 118,
+ PTRBUFF_SQ_DEC = 119,
+ PTRBUFF_SC_VALID_CNTL_EVENT = 120,
+ PTRBUFF_SC_VALID_IJ_XFER = 121,
+ PTRBUFF_SC_NEW_VECTOR_1_Q = 122,
+ PTRBUFF_QUAL_NEW_VECTOR = 123,
+ PTRBUFF_QUAL_EVENT = 124,
+ PTRBUFF_END_BUFFER = 125,
+ PTRBUFF_FILL_QUAD = 126,
+ VERTS_WRITTEN_SPI = 127,
+ TP_FETCH_INSTR_EXEC = 128,
+ TP_FETCH_INSTR_REQ = 129,
+ TP_DATA_RETURN = 130,
+ SPI_WRITE_CYCLES_SP = 131,
+ SPI_WRITES_SP = 132,
+ SP_ALU_INSTR_EXEC = 133,
+ SP_CONST_ADDR_TO_SQ = 134,
+ SP_PRED_KILLS_TO_SQ = 135,
+ SP_EXPORT_CYCLES_TO_SX = 136,
+ SP_EXPORTS_TO_SX = 137,
+ SQ_CYCLES_ELAPSED = 138,
+ SQ_TCFS_OPT_ALLOC_EXEC = 139,
+ SQ_TCFS_NO_OPT_ALLOC = 140,
+ SQ_ALU0_NO_OPT_ALLOC = 141,
+ SQ_ALU1_NO_OPT_ALLOC = 142,
+ SQ_TCFS_ARB_XFC_CNT = 143,
+ SQ_ALU0_ARB_XFC_CNT = 144,
+ SQ_ALU1_ARB_XFC_CNT = 145,
+ SQ_TCFS_CFS_UPDATE_CNT = 146,
+ SQ_ALU0_CFS_UPDATE_CNT = 147,
+ SQ_ALU1_CFS_UPDATE_CNT = 148,
+ SQ_VTX_PUSH_THREAD_CNT = 149,
+ SQ_VTX_POP_THREAD_CNT = 150,
+ SQ_PIX_PUSH_THREAD_CNT = 151,
+ SQ_PIX_POP_THREAD_CNT = 152,
+ SQ_PIX_TOTAL = 153,
+ SQ_PIX_KILLED = 154,
+};
+
+enum a2xx_sx_perfcnt_select {
+ SX_EXPORT_VECTORS = 0,
+ SX_DUMMY_QUADS = 1,
+ SX_ALPHA_FAIL = 2,
+ SX_RB_QUAD_BUSY = 3,
+ SX_RB_COLOR_BUSY = 4,
+ SX_RB_QUAD_STALL = 5,
+ SX_RB_COLOR_STALL = 6,
+};
+
+enum a2xx_rbbm_perfcount1_sel {
+ RBBM1_COUNT = 0,
+ RBBM1_NRT_BUSY = 1,
+ RBBM1_RB_BUSY = 2,
+ RBBM1_SQ_CNTX0_BUSY = 3,
+ RBBM1_SQ_CNTX17_BUSY = 4,
+ RBBM1_VGT_BUSY = 5,
+ RBBM1_VGT_NODMA_BUSY = 6,
+ RBBM1_PA_BUSY = 7,
+ RBBM1_SC_CNTX_BUSY = 8,
+ RBBM1_TPC_BUSY = 9,
+ RBBM1_TC_BUSY = 10,
+ RBBM1_SX_BUSY = 11,
+ RBBM1_CP_COHER_BUSY = 12,
+ RBBM1_CP_NRT_BUSY = 13,
+ RBBM1_GFX_IDLE_STALL = 14,
+ RBBM1_INTERRUPT = 15,
+};
+
+enum a2xx_cp_perfcount_sel {
+ ALWAYS_COUNT = 0,
+ TRANS_FIFO_FULL = 1,
+ TRANS_FIFO_AF = 2,
+ RCIU_PFPTRANS_WAIT = 3,
+ RCIU_NRTTRANS_WAIT = 6,
+ CSF_NRT_READ_WAIT = 8,
+ CSF_I1_FIFO_FULL = 9,
+ CSF_I2_FIFO_FULL = 10,
+ CSF_ST_FIFO_FULL = 11,
+ CSF_RING_ROQ_FULL = 13,
+ CSF_I1_ROQ_FULL = 14,
+ CSF_I2_ROQ_FULL = 15,
+ CSF_ST_ROQ_FULL = 16,
+ MIU_TAG_MEM_FULL = 18,
+ MIU_WRITECLEAN = 19,
+ MIU_NRT_WRITE_STALLED = 22,
+ MIU_NRT_READ_STALLED = 23,
+ ME_WRITE_CONFIRM_FIFO_FULL = 24,
+ ME_VS_DEALLOC_FIFO_FULL = 25,
+ ME_PS_DEALLOC_FIFO_FULL = 26,
+ ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ ME_MICRO_RB_STARVED = 30,
+ ME_MICRO_I1_STARVED = 31,
+ ME_MICRO_I2_STARVED = 32,
+ ME_MICRO_ST_STARVED = 33,
+ RCIU_RBBM_DWORD_SENT = 40,
+ ME_BUSY_CLOCKS = 41,
+ ME_WAIT_CONTEXT_AVAIL = 42,
+ PFP_TYPE0_PACKET = 43,
+ PFP_TYPE3_PACKET = 44,
+ CSF_RB_WPTR_NEQ_RPTR = 45,
+ CSF_I1_SIZE_NEQ_ZERO = 46,
+ CSF_I2_SIZE_NEQ_ZERO = 47,
+ CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a2xx_rb_perfcnt_select {
+ RBPERF_CNTX_BUSY = 0,
+ RBPERF_CNTX_BUSY_MAX = 1,
+ RBPERF_SX_QUAD_STARVED = 2,
+ RBPERF_SX_QUAD_STARVED_MAX = 3,
+ RBPERF_GA_GC_CH0_SYS_REQ = 4,
+ RBPERF_GA_GC_CH0_SYS_REQ_MAX = 5,
+ RBPERF_GA_GC_CH1_SYS_REQ = 6,
+ RBPERF_GA_GC_CH1_SYS_REQ_MAX = 7,
+ RBPERF_MH_STARVED = 8,
+ RBPERF_MH_STARVED_MAX = 9,
+ RBPERF_AZ_BC_COLOR_BUSY = 10,
+ RBPERF_AZ_BC_COLOR_BUSY_MAX = 11,
+ RBPERF_AZ_BC_Z_BUSY = 12,
+ RBPERF_AZ_BC_Z_BUSY_MAX = 13,
+ RBPERF_RB_SC_TILE_RTR_N = 14,
+ RBPERF_RB_SC_TILE_RTR_N_MAX = 15,
+ RBPERF_RB_SC_SAMP_RTR_N = 16,
+ RBPERF_RB_SC_SAMP_RTR_N_MAX = 17,
+ RBPERF_RB_SX_QUAD_RTR_N = 18,
+ RBPERF_RB_SX_QUAD_RTR_N_MAX = 19,
+ RBPERF_RB_SX_COLOR_RTR_N = 20,
+ RBPERF_RB_SX_COLOR_RTR_N_MAX = 21,
+ RBPERF_RB_SC_SAMP_LZ_BUSY = 22,
+ RBPERF_RB_SC_SAMP_LZ_BUSY_MAX = 23,
+ RBPERF_ZXP_STALL = 24,
+ RBPERF_ZXP_STALL_MAX = 25,
+ RBPERF_EVENT_PENDING = 26,
+ RBPERF_EVENT_PENDING_MAX = 27,
+ RBPERF_RB_MH_VALID = 28,
+ RBPERF_RB_MH_VALID_MAX = 29,
+ RBPERF_SX_RB_QUAD_SEND = 30,
+ RBPERF_SX_RB_COLOR_SEND = 31,
+ RBPERF_SC_RB_TILE_SEND = 32,
+ RBPERF_SC_RB_SAMPLE_SEND = 33,
+ RBPERF_SX_RB_MEM_EXPORT = 34,
+ RBPERF_SX_RB_QUAD_EVENT = 35,
+ RBPERF_SC_RB_TILE_EVENT_FILTERED = 36,
+ RBPERF_SC_RB_TILE_EVENT_ALL = 37,
+ RBPERF_RB_SC_EZ_SEND = 38,
+ RBPERF_RB_SX_INDEX_SEND = 39,
+ RBPERF_GMEM_INTFO_RD = 40,
+ RBPERF_GMEM_INTF1_RD = 41,
+ RBPERF_GMEM_INTFO_WR = 42,
+ RBPERF_GMEM_INTF1_WR = 43,
+ RBPERF_RB_CP_CONTEXT_DONE = 44,
+ RBPERF_RB_CP_CACHE_FLUSH = 45,
+ RBPERF_ZPASS_DONE = 46,
+ RBPERF_ZCMD_VALID = 47,
+ RBPERF_CCMD_VALID = 48,
+ RBPERF_ACCUM_GRANT = 49,
+ RBPERF_ACCUM_C0_GRANT = 50,
+ RBPERF_ACCUM_C1_GRANT = 51,
+ RBPERF_ACCUM_FULL_BE_WR = 52,
+ RBPERF_ACCUM_REQUEST_NO_GRANT = 53,
+ RBPERF_ACCUM_TIMEOUT_PULSE = 54,
+ RBPERF_ACCUM_LIN_TIMEOUT_PULSE = 55,
+ RBPERF_ACCUM_CAM_HIT_FLUSHING = 56,
+};
+
+enum a2xx_mh_perfcnt_select {
+ CP_R0_REQUESTS = 0,
+ CP_R1_REQUESTS = 1,
+ CP_R2_REQUESTS = 2,
+ CP_R3_REQUESTS = 3,
+ CP_R4_REQUESTS = 4,
+ CP_TOTAL_READ_REQUESTS = 5,
+ CP_TOTAL_WRITE_REQUESTS = 6,
+ CP_TOTAL_REQUESTS = 7,
+ CP_DATA_BYTES_WRITTEN = 8,
+ CP_WRITE_CLEAN_RESPONSES = 9,
+ CP_R0_READ_BURSTS_RECEIVED = 10,
+ CP_R1_READ_BURSTS_RECEIVED = 11,
+ CP_R2_READ_BURSTS_RECEIVED = 12,
+ CP_R3_READ_BURSTS_RECEIVED = 13,
+ CP_R4_READ_BURSTS_RECEIVED = 14,
+ CP_TOTAL_READ_BURSTS_RECEIVED = 15,
+ CP_R0_DATA_BEATS_READ = 16,
+ CP_R1_DATA_BEATS_READ = 17,
+ CP_R2_DATA_BEATS_READ = 18,
+ CP_R3_DATA_BEATS_READ = 19,
+ CP_R4_DATA_BEATS_READ = 20,
+ CP_TOTAL_DATA_BEATS_READ = 21,
+ VGT_R0_REQUESTS = 22,
+ VGT_R1_REQUESTS = 23,
+ VGT_TOTAL_REQUESTS = 24,
+ VGT_R0_READ_BURSTS_RECEIVED = 25,
+ VGT_R1_READ_BURSTS_RECEIVED = 26,
+ VGT_TOTAL_READ_BURSTS_RECEIVED = 27,
+ VGT_R0_DATA_BEATS_READ = 28,
+ VGT_R1_DATA_BEATS_READ = 29,
+ VGT_TOTAL_DATA_BEATS_READ = 30,
+ TC_TOTAL_REQUESTS = 31,
+ TC_ROQ_REQUESTS = 32,
+ TC_INFO_SENT = 33,
+ TC_READ_BURSTS_RECEIVED = 34,
+ TC_DATA_BEATS_READ = 35,
+ TCD_BURSTS_READ = 36,
+ RB_REQUESTS = 37,
+ RB_DATA_BYTES_WRITTEN = 38,
+ RB_WRITE_CLEAN_RESPONSES = 39,
+ AXI_READ_REQUESTS_ID_0 = 40,
+ AXI_READ_REQUESTS_ID_1 = 41,
+ AXI_READ_REQUESTS_ID_2 = 42,
+ AXI_READ_REQUESTS_ID_3 = 43,
+ AXI_READ_REQUESTS_ID_4 = 44,
+ AXI_READ_REQUESTS_ID_5 = 45,
+ AXI_READ_REQUESTS_ID_6 = 46,
+ AXI_READ_REQUESTS_ID_7 = 47,
+ AXI_TOTAL_READ_REQUESTS = 48,
+ AXI_WRITE_REQUESTS_ID_0 = 49,
+ AXI_WRITE_REQUESTS_ID_1 = 50,
+ AXI_WRITE_REQUESTS_ID_2 = 51,
+ AXI_WRITE_REQUESTS_ID_3 = 52,
+ AXI_WRITE_REQUESTS_ID_4 = 53,
+ AXI_WRITE_REQUESTS_ID_5 = 54,
+ AXI_WRITE_REQUESTS_ID_6 = 55,
+ AXI_WRITE_REQUESTS_ID_7 = 56,
+ AXI_TOTAL_WRITE_REQUESTS = 57,
+ AXI_TOTAL_REQUESTS_ID_0 = 58,
+ AXI_TOTAL_REQUESTS_ID_1 = 59,
+ AXI_TOTAL_REQUESTS_ID_2 = 60,
+ AXI_TOTAL_REQUESTS_ID_3 = 61,
+ AXI_TOTAL_REQUESTS_ID_4 = 62,
+ AXI_TOTAL_REQUESTS_ID_5 = 63,
+ AXI_TOTAL_REQUESTS_ID_6 = 64,
+ AXI_TOTAL_REQUESTS_ID_7 = 65,
+ AXI_TOTAL_REQUESTS = 66,
+ AXI_READ_CHANNEL_BURSTS_ID_0 = 67,
+ AXI_READ_CHANNEL_BURSTS_ID_1 = 68,
+ AXI_READ_CHANNEL_BURSTS_ID_2 = 69,
+ AXI_READ_CHANNEL_BURSTS_ID_3 = 70,
+ AXI_READ_CHANNEL_BURSTS_ID_4 = 71,
+ AXI_READ_CHANNEL_BURSTS_ID_5 = 72,
+ AXI_READ_CHANNEL_BURSTS_ID_6 = 73,
+ AXI_READ_CHANNEL_BURSTS_ID_7 = 74,
+ AXI_READ_CHANNEL_TOTAL_BURSTS = 75,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0 = 76,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1 = 77,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2 = 78,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3 = 79,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4 = 80,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5 = 81,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6 = 82,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7 = 83,
+ AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ = 84,
+ AXI_WRITE_CHANNEL_BURSTS_ID_0 = 85,
+ AXI_WRITE_CHANNEL_BURSTS_ID_1 = 86,
+ AXI_WRITE_CHANNEL_BURSTS_ID_2 = 87,
+ AXI_WRITE_CHANNEL_BURSTS_ID_3 = 88,
+ AXI_WRITE_CHANNEL_BURSTS_ID_4 = 89,
+ AXI_WRITE_CHANNEL_BURSTS_ID_5 = 90,
+ AXI_WRITE_CHANNEL_BURSTS_ID_6 = 91,
+ AXI_WRITE_CHANNEL_BURSTS_ID_7 = 92,
+ AXI_WRITE_CHANNEL_TOTAL_BURSTS = 93,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0 = 94,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1 = 95,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2 = 96,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3 = 97,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4 = 98,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5 = 99,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6 = 100,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7 = 101,
+ AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN = 102,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0 = 103,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1 = 104,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2 = 105,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3 = 106,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4 = 107,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5 = 108,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6 = 109,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7 = 110,
+ AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES = 111,
+ TOTAL_MMU_MISSES = 112,
+ MMU_READ_MISSES = 113,
+ MMU_WRITE_MISSES = 114,
+ TOTAL_MMU_HITS = 115,
+ MMU_READ_HITS = 116,
+ MMU_WRITE_HITS = 117,
+ SPLIT_MODE_TC_HITS = 118,
+ SPLIT_MODE_TC_MISSES = 119,
+ SPLIT_MODE_NON_TC_HITS = 120,
+ SPLIT_MODE_NON_TC_MISSES = 121,
+ STALL_AWAITING_TLB_MISS_FETCH = 122,
+ MMU_TLB_MISS_READ_BURSTS_RECEIVED = 123,
+ MMU_TLB_MISS_DATA_BEATS_READ = 124,
+ CP_CYCLES_HELD_OFF = 125,
+ VGT_CYCLES_HELD_OFF = 126,
+ TC_CYCLES_HELD_OFF = 127,
+ TC_ROQ_CYCLES_HELD_OFF = 128,
+ TC_CYCLES_HELD_OFF_TCD_FULL = 129,
+ RB_CYCLES_HELD_OFF = 130,
+ TOTAL_CYCLES_ANY_CLNT_HELD_OFF = 131,
+ TLB_MISS_CYCLES_HELD_OFF = 132,
+ AXI_READ_REQUEST_HELD_OFF = 133,
+ AXI_WRITE_REQUEST_HELD_OFF = 134,
+ AXI_REQUEST_HELD_OFF = 135,
+ AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT = 136,
+ AXI_WRITE_DATA_HELD_OFF = 137,
+ CP_SAME_PAGE_BANK_REQUESTS = 138,
+ VGT_SAME_PAGE_BANK_REQUESTS = 139,
+ TC_SAME_PAGE_BANK_REQUESTS = 140,
+ TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS = 141,
+ RB_SAME_PAGE_BANK_REQUESTS = 142,
+ TOTAL_SAME_PAGE_BANK_REQUESTS = 143,
+ CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 144,
+ VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 145,
+ TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 146,
+ RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 147,
+ TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT = 148,
+ TOTAL_MH_READ_REQUESTS = 149,
+ TOTAL_MH_WRITE_REQUESTS = 150,
+ TOTAL_MH_REQUESTS = 151,
+ MH_BUSY = 152,
+ CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 153,
+ VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 154,
+ TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 155,
+ RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 156,
+ TC_ROQ_N_VALID_ENTRIES = 157,
+ ARQ_N_ENTRIES = 158,
+ WDB_N_ENTRIES = 159,
+ MH_READ_LATENCY_OUTST_REQ_SUM = 160,
+ MC_READ_LATENCY_OUTST_REQ_SUM = 161,
+ MC_TOTAL_READ_REQUESTS = 162,
+ ELAPSED_CYCLES_MH_GATED_CLK = 163,
+ ELAPSED_CLK_CYCLES = 164,
+ CP_W_16B_REQUESTS = 165,
+ CP_W_32B_REQUESTS = 166,
+ TC_16B_REQUESTS = 167,
+ TC_32B_REQUESTS = 168,
+ PA_REQUESTS = 169,
+ PA_DATA_BYTES_WRITTEN = 170,
+ PA_WRITE_CLEAN_RESPONSES = 171,
+ PA_CYCLES_HELD_OFF = 172,
+ AXI_READ_REQUEST_DATA_BEATS_ID_0 = 173,
+ AXI_READ_REQUEST_DATA_BEATS_ID_1 = 174,
+ AXI_READ_REQUEST_DATA_BEATS_ID_2 = 175,
+ AXI_READ_REQUEST_DATA_BEATS_ID_3 = 176,
+ AXI_READ_REQUEST_DATA_BEATS_ID_4 = 177,
+ AXI_READ_REQUEST_DATA_BEATS_ID_5 = 178,
+ AXI_READ_REQUEST_DATA_BEATS_ID_6 = 179,
+ AXI_READ_REQUEST_DATA_BEATS_ID_7 = 180,
+ AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181,
+};
+
+enum adreno_mmu_clnt_beh {
+ BEH_NEVR = 0,
+ BEH_TRAN_RNG = 1,
+ BEH_TRAN_FLT = 2,
+};
+
+enum sq_tex_clamp {
+ SQ_TEX_WRAP = 0,
+ SQ_TEX_MIRROR = 1,
+ SQ_TEX_CLAMP_LAST_TEXEL = 2,
+ SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
+ SQ_TEX_CLAMP_HALF_BORDER = 4,
+ SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
+ SQ_TEX_CLAMP_BORDER = 6,
+ SQ_TEX_MIRROR_ONCE_BORDER = 7,
+};
+
+enum sq_tex_swiz {
+ SQ_TEX_X = 0,
+ SQ_TEX_Y = 1,
+ SQ_TEX_Z = 2,
+ SQ_TEX_W = 3,
+ SQ_TEX_ZERO = 4,
+ SQ_TEX_ONE = 5,
+};
+
+enum sq_tex_filter {
+ SQ_TEX_FILTER_POINT = 0,
+ SQ_TEX_FILTER_BILINEAR = 1,
+ SQ_TEX_FILTER_BASEMAP = 2,
+ SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+ SQ_TEX_ANISO_FILTER_DISABLED = 0,
+ SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+ SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+ SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+ SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+ SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+ SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+ SQ_TEX_DIMENSION_1D = 0,
+ SQ_TEX_DIMENSION_2D = 1,
+ SQ_TEX_DIMENSION_3D = 2,
+ SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+ SQ_TEX_BORDER_COLOR_BLACK = 0,
+ SQ_TEX_BORDER_COLOR_WHITE = 1,
+ SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+ SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+ SQ_TEX_SIGN_UNSIGNED = 0,
+ SQ_TEX_SIGN_SIGNED = 1,
+ SQ_TEX_SIGN_UNSIGNED_BIASED = 2,
+ SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+ SQ_TEX_ENDIAN_NONE = 0,
+ SQ_TEX_ENDIAN_8IN16 = 1,
+ SQ_TEX_ENDIAN_8IN32 = 2,
+ SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+ SQ_TEX_CLAMP_POLICY_D3D = 0,
+ SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+ SQ_TEX_NUM_FORMAT_FRAC = 0,
+ SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+ SQ_TEX_TYPE_0 = 0,
+ SQ_TEX_TYPE_1 = 1,
+ SQ_TEX_TYPE_2 = 2,
+ SQ_TEX_TYPE_3 = 3,
+};
+
+#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
+
+#define REG_A2XX_RBBM_CNTL 0x0000003b
+
+#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
+
+#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
+
+#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
+
+#define REG_A2XX_MH_MMU_CONFIG 0x00000040
+#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
+#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
+static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
+static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
+static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
+
+#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
+
+#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043
+
+#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
+
+#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
+
+#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
+
+#define REG_A2XX_MH_MMU_MPU_END 0x00000047
+
+#define REG_A2XX_NQWAIT_UNTIL 0x00000394
+
+#define REG_A2XX_RBBM_PERFCOUNTER0_SELECT 0x00000395
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000396
+
+#define REG_A2XX_RBBM_PERFCOUNTER0_LO 0x00000397
+
+#define REG_A2XX_RBBM_PERFCOUNTER0_HI 0x00000398
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000399
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x0000039a
+
+#define REG_A2XX_RBBM_DEBUG 0x0000039b
+
+#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
+#define A2XX_RBBM_PM_OVERRIDE1_RBBM_AHBCLK_PM_OVERRIDE 0x00000001
+#define A2XX_RBBM_PM_OVERRIDE1_SC_REG_SCLK_PM_OVERRIDE 0x00000002
+#define A2XX_RBBM_PM_OVERRIDE1_SC_SCLK_PM_OVERRIDE 0x00000004
+#define A2XX_RBBM_PM_OVERRIDE1_SP_TOP_SCLK_PM_OVERRIDE 0x00000008
+#define A2XX_RBBM_PM_OVERRIDE1_SP_V0_SCLK_PM_OVERRIDE 0x00000010
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_SCLK_PM_OVERRIDE 0x00000020
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_FIFOS_SCLK_PM_OVERRIDE 0x00000040
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_CONST_MEM_SCLK_PM_OVERRIDE 0x00000080
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_SQ_SCLK_PM_OVERRIDE 0x00000100
+#define A2XX_RBBM_PM_OVERRIDE1_SX_SCLK_PM_OVERRIDE 0x00000200
+#define A2XX_RBBM_PM_OVERRIDE1_SX_REG_SCLK_PM_OVERRIDE 0x00000400
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCO_SCLK_PM_OVERRIDE 0x00000800
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCM_SCLK_PM_OVERRIDE 0x00001000
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCD_SCLK_PM_OVERRIDE 0x00002000
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_REG_SCLK_PM_OVERRIDE 0x00004000
+#define A2XX_RBBM_PM_OVERRIDE1_TPC_TPC_SCLK_PM_OVERRIDE 0x00008000
+#define A2XX_RBBM_PM_OVERRIDE1_TPC_REG_SCLK_PM_OVERRIDE 0x00010000
+#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCA_SCLK_PM_OVERRIDE 0x00020000
+#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_SCLK_PM_OVERRIDE 0x00040000
+#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_READ_SCLK_PM_OVERRIDE 0x00080000
+#define A2XX_RBBM_PM_OVERRIDE1_TP_TP_SCLK_PM_OVERRIDE 0x00100000
+#define A2XX_RBBM_PM_OVERRIDE1_TP_REG_SCLK_PM_OVERRIDE 0x00200000
+#define A2XX_RBBM_PM_OVERRIDE1_CP_G_SCLK_PM_OVERRIDE 0x00400000
+#define A2XX_RBBM_PM_OVERRIDE1_CP_REG_SCLK_PM_OVERRIDE 0x00800000
+#define A2XX_RBBM_PM_OVERRIDE1_CP_G_REG_SCLK_PM_OVERRIDE 0x01000000
+#define A2XX_RBBM_PM_OVERRIDE1_SPI_SCLK_PM_OVERRIDE 0x02000000
+#define A2XX_RBBM_PM_OVERRIDE1_RB_REG_SCLK_PM_OVERRIDE 0x04000000
+#define A2XX_RBBM_PM_OVERRIDE1_RB_SCLK_PM_OVERRIDE 0x08000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_MH_SCLK_PM_OVERRIDE 0x10000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_REG_SCLK_PM_OVERRIDE 0x20000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_MMU_SCLK_PM_OVERRIDE 0x40000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_TCROQ_SCLK_PM_OVERRIDE 0x80000000
+
+#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
+
+#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
+
+#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
+
+#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
+
+#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
+
+#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
+
+#define REG_A2XX_RBBM_INT_ACK 0x000003b6
+
+#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
+
+#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
+
+#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
+
+#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
+
+#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
+
+#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
+
+#define REG_A2XX_RBBM_STATUS 0x000005d0
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
+static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
+{
+ return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
+}
+#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
+#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
+#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
+#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
+#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
+#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
+#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
+#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
+#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
+#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
+#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
+#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
+#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
+#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
+#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
+#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
+#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
+#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
+#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
+
+#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
+#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
+#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
+#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
+#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
+#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
+#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+
+#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
+
+#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
+
+#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
+
+#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
+
+#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
+#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK 0xffffffe0
+#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT 5
+static inline uint32_t A2XX_PA_SU_FACE_DATA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT) & A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK;
+}
+
+#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
+#define A2XX_SQ_GPR_MANAGEMENT_REG_DYNAMIC 0x00000001
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK 0x00000ff0
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT 4
+static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK;
+}
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK 0x000ff000
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT 12
+static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK;
+}
+
+#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
+
+#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK 0x00000fff
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT 0
+static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK;
+}
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK 0x0fff0000
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT 16
+static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK;
+}
+
+#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
+
+#define REG_A2XX_SQ_INT_CNTL 0x00000d34
+
+#define REG_A2XX_SQ_INT_STATUS 0x00000d35
+
+#define REG_A2XX_SQ_INT_ACK 0x00000d36
+
+#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
+
+#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
+
+#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
+
+#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
+
+#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
+
+#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
+
+#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
+
+#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
+
+#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
+#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
+
+#define REG_A2XX_TP0_CHICKEN 0x00000e1e
+
+#define REG_A2XX_RB_BC_CONTROL 0x00000f01
+#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
+#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
+#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
+static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
+#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
+#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
+#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
+}
+#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
+static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
+#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
+#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
+
+#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
+
+#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
+
+#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
+
+#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
+
+#define REG_A2XX_RB_COLOR_INFO 0x00002001
+#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
+#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
+static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
+}
+#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
+#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
+#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
+static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
+}
+#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
+#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
+static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
+#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
+{
+ return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_INFO 0x00002002
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
+
+#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
+#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_UNKNOWN_2010 0x00002010
+
+#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
+
+#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
+
+#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
+
+#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
+
+#define REG_A2XX_RB_COLOR_MASK 0x00002104
+#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
+#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
+#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
+#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
+
+#define REG_A2XX_RB_BLEND_RED 0x00002105
+
+#define REG_A2XX_RB_BLEND_GREEN 0x00002106
+
+#define REG_A2XX_RB_BLEND_BLUE 0x00002107
+
+#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
+
+#define REG_A2XX_RB_FOG_COLOR 0x00002109
+#define A2XX_RB_FOG_COLOR_FOG_RED__MASK 0x000000ff
+#define A2XX_RB_FOG_COLOR_FOG_RED__SHIFT 0
+static inline uint32_t A2XX_RB_FOG_COLOR_FOG_RED(uint32_t val)
+{
+ return ((val) << A2XX_RB_FOG_COLOR_FOG_RED__SHIFT) & A2XX_RB_FOG_COLOR_FOG_RED__MASK;
+}
+#define A2XX_RB_FOG_COLOR_FOG_GREEN__MASK 0x0000ff00
+#define A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT 8
+static inline uint32_t A2XX_RB_FOG_COLOR_FOG_GREEN(uint32_t val)
+{
+ return ((val) << A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT) & A2XX_RB_FOG_COLOR_FOG_GREEN__MASK;
+}
+#define A2XX_RB_FOG_COLOR_FOG_BLUE__MASK 0x00ff0000
+#define A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT 16
+static inline uint32_t A2XX_RB_FOG_COLOR_FOG_BLUE(uint32_t val)
+{
+ return ((val) << A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT) & A2XX_RB_FOG_COLOR_FOG_BLUE__MASK;
+}
+
+#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
+#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_ALPHA_REF 0x0000210e
+
+#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
+#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
+#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
+#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
+#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
+#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
+#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
+#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
+#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
+
+#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
+#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
+#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
+#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
+#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
+
+#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
+#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK 0x0000ffff
+#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT 0
+static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK;
+}
+#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK 0xffff0000
+#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT 16
+static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK;
+}
+
+#define REG_A2XX_SQ_WRAPPING_0 0x00002183
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK 0x0000000f
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT 0
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_0(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK 0x000000f0
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT 4
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_1(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK 0x00000f00
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT 8
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_2(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK 0x0000f000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT 12
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_3(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK 0x000f0000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT 16
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_4(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK 0x00f00000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT 20
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_5(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK 0x0f000000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT 24
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_6(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK 0xf0000000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT 28
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_7(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK;
+}
+
+#define REG_A2XX_SQ_WRAPPING_1 0x00002184
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK 0x0000000f
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT 0
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_8(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK 0x000000f0
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT 4
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_9(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK 0x00000f00
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT 8
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_10(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK 0x0000f000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT 12
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_11(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK 0x000f0000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT 16
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_12(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK 0x00f00000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT 20
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_13(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK 0x0f000000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT 24
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_14(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK 0xf0000000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT 28
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_15(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK;
+}
+
+#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
+#define A2XX_SQ_PS_PROGRAM_BASE__MASK 0x00000fff
+#define A2XX_SQ_PS_PROGRAM_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_PS_PROGRAM_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_PROGRAM_BASE__SHIFT) & A2XX_SQ_PS_PROGRAM_BASE__MASK;
+}
+#define A2XX_SQ_PS_PROGRAM_SIZE__MASK 0x00fff000
+#define A2XX_SQ_PS_PROGRAM_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_PS_PROGRAM_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_PS_PROGRAM_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
+#define A2XX_SQ_VS_PROGRAM_BASE__MASK 0x00000fff
+#define A2XX_SQ_VS_PROGRAM_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_VS_PROGRAM_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_PROGRAM_BASE__SHIFT) & A2XX_SQ_VS_PROGRAM_BASE__MASK;
+}
+#define A2XX_SQ_VS_PROGRAM_SIZE__MASK 0x00fff000
+#define A2XX_SQ_VS_PROGRAM_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_VS_PROGRAM_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_VS_PROGRAM_SIZE__MASK;
+}
+
+#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
+}
+
+#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
+
+#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
+#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
+#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
+#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
+#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
+}
+
+#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
+
+#define REG_A2XX_RB_COLORCONTROL 0x00002202
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
+#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
+#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
+#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
+#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
+#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
+}
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
+#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
+static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
+{
+ return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
+}
+#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
+#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
+#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
+#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
+#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
+
+#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
+#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
+#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
+#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
+#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
+#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
+#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
+#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
+#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
+#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
+
+#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
+#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
+#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
+#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
+#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_RB_MODECONTROL 0x00002208
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
+static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
+{
+ return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
+}
+
+#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
+
+#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
+
+#define REG_A2XX_CLEAR_COLOR 0x0000220b
+#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
+#define A2XX_CLEAR_COLOR_RED__SHIFT 0
+static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
+}
+#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
+#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
+static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
+}
+#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
+#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
+static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
+}
+#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
+#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
+static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
+}
+
+#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
+
+#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+}
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
+#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
+}
+
+#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
+#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ENA 0x00000001
+#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK 0x0000007e
+#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT 1
+static inline uint32_t A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT) & A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK;
+}
+#define A2XX_PA_SC_VIZ_QUERY_KILL_PIX_POST_EARLY_Z 0x00000100
+
+#define REG_A2XX_VGT_ENHANCE 0x00002294
+
+#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
+}
+#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
+#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
+#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
+
+#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
+#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK 0x00000007
+#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT 0
+static inline uint32_t A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT) & A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK;
+}
+#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK 0x0001e000
+#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT 13
+static inline uint32_t A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT) & A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK;
+}
+
+#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_SQ_VS_CONST 0x00002307
+#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_PS_CONST 0x00002308
+#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
+
+#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
+
+#define REG_A2XX_PA_SC_AA_MASK 0x00002312
+
+#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
+#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK 0x00000007
+#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT 0
+static inline uint32_t A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT) & A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK;
+}
+
+#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
+#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK 0x00000003
+#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT 0
+static inline uint32_t A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST(uint32_t val)
+{
+ return ((val) << A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT) & A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK;
+}
+
+#define REG_A2XX_RB_COPY_CONTROL 0x00002318
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
+}
+#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
+
+#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
+#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
+#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
+
+#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
+#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
+#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
+}
+#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
+#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
+
+#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
+
+#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
+
+#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
+
+#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
+
+#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00002381
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_SCALE 0x00002382
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
+
+#define REG_A2XX_SQ_CONSTANT_0 0x00004000
+
+#define REG_A2XX_SQ_FETCH_0 0x00004800
+
+#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
+
+#define REG_A2XX_SQ_CF_LOOP 0x00004908
+
+#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
+
+#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
+
+#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_SELECT 0x00000c8b
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_LOW 0x00000c8c
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_HI 0x00000c8d
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_LOW 0x00000c8e
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_HI 0x00000c8f
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_LOW 0x00000c90
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_HI 0x00000c91
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_LOW 0x00000c92
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_HI 0x00000c93
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_SELECT 0x00000c98
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_LOW 0x00000c99
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_HI 0x00000c9a
+
+#define REG_A2XX_VGT_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A2XX_VGT_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A2XX_VGT_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A2XX_VGT_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A2XX_VGT_PERFCOUNTER0_LOW 0x00000c4c
+
+#define REG_A2XX_VGT_PERFCOUNTER1_LOW 0x00000c4e
+
+#define REG_A2XX_VGT_PERFCOUNTER2_LOW 0x00000c50
+
+#define REG_A2XX_VGT_PERFCOUNTER3_LOW 0x00000c52
+
+#define REG_A2XX_VGT_PERFCOUNTER0_HI 0x00000c4d
+
+#define REG_A2XX_VGT_PERFCOUNTER1_HI 0x00000c4f
+
+#define REG_A2XX_VGT_PERFCOUNTER2_HI 0x00000c51
+
+#define REG_A2XX_VGT_PERFCOUNTER3_HI 0x00000c53
+
+#define REG_A2XX_TCR_PERFCOUNTER0_SELECT 0x00000e05
+
+#define REG_A2XX_TCR_PERFCOUNTER1_SELECT 0x00000e08
+
+#define REG_A2XX_TCR_PERFCOUNTER0_HI 0x00000e06
+
+#define REG_A2XX_TCR_PERFCOUNTER1_HI 0x00000e09
+
+#define REG_A2XX_TCR_PERFCOUNTER0_LOW 0x00000e07
+
+#define REG_A2XX_TCR_PERFCOUNTER1_LOW 0x00000e0a
+
+#define REG_A2XX_TP0_PERFCOUNTER0_SELECT 0x00000e1f
+
+#define REG_A2XX_TP0_PERFCOUNTER0_HI 0x00000e20
+
+#define REG_A2XX_TP0_PERFCOUNTER0_LOW 0x00000e21
+
+#define REG_A2XX_TP0_PERFCOUNTER1_SELECT 0x00000e22
+
+#define REG_A2XX_TP0_PERFCOUNTER1_HI 0x00000e23
+
+#define REG_A2XX_TP0_PERFCOUNTER1_LOW 0x00000e24
+
+#define REG_A2XX_TCM_PERFCOUNTER0_SELECT 0x00000e54
+
+#define REG_A2XX_TCM_PERFCOUNTER1_SELECT 0x00000e57
+
+#define REG_A2XX_TCM_PERFCOUNTER0_HI 0x00000e55
+
+#define REG_A2XX_TCM_PERFCOUNTER1_HI 0x00000e58
+
+#define REG_A2XX_TCM_PERFCOUNTER0_LOW 0x00000e56
+
+#define REG_A2XX_TCM_PERFCOUNTER1_LOW 0x00000e59
+
+#define REG_A2XX_TCF_PERFCOUNTER0_SELECT 0x00000e5a
+
+#define REG_A2XX_TCF_PERFCOUNTER1_SELECT 0x00000e5d
+
+#define REG_A2XX_TCF_PERFCOUNTER2_SELECT 0x00000e60
+
+#define REG_A2XX_TCF_PERFCOUNTER3_SELECT 0x00000e63
+
+#define REG_A2XX_TCF_PERFCOUNTER4_SELECT 0x00000e66
+
+#define REG_A2XX_TCF_PERFCOUNTER5_SELECT 0x00000e69
+
+#define REG_A2XX_TCF_PERFCOUNTER6_SELECT 0x00000e6c
+
+#define REG_A2XX_TCF_PERFCOUNTER7_SELECT 0x00000e6f
+
+#define REG_A2XX_TCF_PERFCOUNTER8_SELECT 0x00000e72
+
+#define REG_A2XX_TCF_PERFCOUNTER9_SELECT 0x00000e75
+
+#define REG_A2XX_TCF_PERFCOUNTER10_SELECT 0x00000e78
+
+#define REG_A2XX_TCF_PERFCOUNTER11_SELECT 0x00000e7b
+
+#define REG_A2XX_TCF_PERFCOUNTER0_HI 0x00000e5b
+
+#define REG_A2XX_TCF_PERFCOUNTER1_HI 0x00000e5e
+
+#define REG_A2XX_TCF_PERFCOUNTER2_HI 0x00000e61
+
+#define REG_A2XX_TCF_PERFCOUNTER3_HI 0x00000e64
+
+#define REG_A2XX_TCF_PERFCOUNTER4_HI 0x00000e67
+
+#define REG_A2XX_TCF_PERFCOUNTER5_HI 0x00000e6a
+
+#define REG_A2XX_TCF_PERFCOUNTER6_HI 0x00000e6d
+
+#define REG_A2XX_TCF_PERFCOUNTER7_HI 0x00000e70
+
+#define REG_A2XX_TCF_PERFCOUNTER8_HI 0x00000e73
+
+#define REG_A2XX_TCF_PERFCOUNTER9_HI 0x00000e76
+
+#define REG_A2XX_TCF_PERFCOUNTER10_HI 0x00000e79
+
+#define REG_A2XX_TCF_PERFCOUNTER11_HI 0x00000e7c
+
+#define REG_A2XX_TCF_PERFCOUNTER0_LOW 0x00000e5c
+
+#define REG_A2XX_TCF_PERFCOUNTER1_LOW 0x00000e5f
+
+#define REG_A2XX_TCF_PERFCOUNTER2_LOW 0x00000e62
+
+#define REG_A2XX_TCF_PERFCOUNTER3_LOW 0x00000e65
+
+#define REG_A2XX_TCF_PERFCOUNTER4_LOW 0x00000e68
+
+#define REG_A2XX_TCF_PERFCOUNTER5_LOW 0x00000e6b
+
+#define REG_A2XX_TCF_PERFCOUNTER6_LOW 0x00000e6e
+
+#define REG_A2XX_TCF_PERFCOUNTER7_LOW 0x00000e71
+
+#define REG_A2XX_TCF_PERFCOUNTER8_LOW 0x00000e74
+
+#define REG_A2XX_TCF_PERFCOUNTER9_LOW 0x00000e77
+
+#define REG_A2XX_TCF_PERFCOUNTER10_LOW 0x00000e7a
+
+#define REG_A2XX_TCF_PERFCOUNTER11_LOW 0x00000e7d
+
+#define REG_A2XX_SQ_PERFCOUNTER0_SELECT 0x00000dc8
+
+#define REG_A2XX_SQ_PERFCOUNTER1_SELECT 0x00000dc9
+
+#define REG_A2XX_SQ_PERFCOUNTER2_SELECT 0x00000dca
+
+#define REG_A2XX_SQ_PERFCOUNTER3_SELECT 0x00000dcb
+
+#define REG_A2XX_SQ_PERFCOUNTER0_LOW 0x00000dcc
+
+#define REG_A2XX_SQ_PERFCOUNTER0_HI 0x00000dcd
+
+#define REG_A2XX_SQ_PERFCOUNTER1_LOW 0x00000dce
+
+#define REG_A2XX_SQ_PERFCOUNTER1_HI 0x00000dcf
+
+#define REG_A2XX_SQ_PERFCOUNTER2_LOW 0x00000dd0
+
+#define REG_A2XX_SQ_PERFCOUNTER2_HI 0x00000dd1
+
+#define REG_A2XX_SQ_PERFCOUNTER3_LOW 0x00000dd2
+
+#define REG_A2XX_SQ_PERFCOUNTER3_HI 0x00000dd3
+
+#define REG_A2XX_SX_PERFCOUNTER0_SELECT 0x00000dd4
+
+#define REG_A2XX_SX_PERFCOUNTER0_LOW 0x00000dd8
+
+#define REG_A2XX_SX_PERFCOUNTER0_HI 0x00000dd9
+
+#define REG_A2XX_MH_PERFCOUNTER0_SELECT 0x00000a46
+
+#define REG_A2XX_MH_PERFCOUNTER1_SELECT 0x00000a4a
+
+#define REG_A2XX_MH_PERFCOUNTER0_CONFIG 0x00000a47
+
+#define REG_A2XX_MH_PERFCOUNTER1_CONFIG 0x00000a4b
+
+#define REG_A2XX_MH_PERFCOUNTER0_LOW 0x00000a48
+
+#define REG_A2XX_MH_PERFCOUNTER1_LOW 0x00000a4c
+
+#define REG_A2XX_MH_PERFCOUNTER0_HI 0x00000a49
+
+#define REG_A2XX_MH_PERFCOUNTER1_HI 0x00000a4d
+
+#define REG_A2XX_RB_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A2XX_RB_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A2XX_RB_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A2XX_RB_PERFCOUNTER3_SELECT 0x00000f07
+
+#define REG_A2XX_RB_PERFCOUNTER0_LOW 0x00000f08
+
+#define REG_A2XX_RB_PERFCOUNTER0_HI 0x00000f09
+
+#define REG_A2XX_RB_PERFCOUNTER1_LOW 0x00000f0a
+
+#define REG_A2XX_RB_PERFCOUNTER1_HI 0x00000f0b
+
+#define REG_A2XX_RB_PERFCOUNTER2_LOW 0x00000f0c
+
+#define REG_A2XX_RB_PERFCOUNTER2_HI 0x00000f0d
+
+#define REG_A2XX_RB_PERFCOUNTER3_LOW 0x00000f0e
+
+#define REG_A2XX_RB_PERFCOUNTER3_HI 0x00000f0f
+
+#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+ return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
+#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
+#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
+#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
+#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
+}
+#define A2XX_SQ_TEX_0_TILED 0x80000000
+
+#define REG_A2XX_SQ_TEX_1 0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+ return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+ return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED 0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+ return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_2 0x00000002
+#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
+#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
+}
+#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
+#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
+}
+#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+ return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
+#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
+#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
+#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
+#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
+}
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(int32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
+static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
+static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4 0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5 0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+ return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+ return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
+
+
+#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644
index 000000000..2428d6ac5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 18);
+
+ /* All fields present (bits 9:0) */
+ OUT_RING(ring, 0x000003ff);
+ /* Disable/Enable Real-Time Stream processing (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+ /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+
+ OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+ OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+ OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+ /* Vertex and Pixel Shader Start Addresses in instructions
+ * (3 DWORDS per instruction) */
+ OUT_RING(ring, 0x80000180);
+ /* Maximum Contexts */
+ OUT_RING(ring, 0x00000001);
+ /* Write Confirm Interval and The CP will wait the
+ * wait_interval * 16 clocks between polling */
+ OUT_RING(ring, 0x00000000);
+ /* NQ and External Memory Swap */
+ OUT_RING(ring, 0x00000000);
+ /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+ OUT_RING(ring, 0x200001f2);
+ /* Disable header dumping and Header dump address */
+ OUT_RING(ring, 0x00000000);
+ /* Header dump size */
+ OUT_RING(ring, 0x00000000);
+
+ /* enable protected mode */
+ OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+ return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ dma_addr_t pt_base, tran_error;
+ uint32_t *ptr, len;
+ int i, ret;
+
+ msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+ DBG("%s", gpu->name);
+
+ /* halt ME to avoid ucode upload issues on a20x */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ /* note: kgsl uses 0x00000001 after first reset on a22x */
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+ msleep(30);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+ /* note: kgsl uses 0x0000ffff for a20x */
+ gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+ /* MPU: physical range */
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+ A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+ /* same as parameters in adreno_gpu */
+ gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+ A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+ gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+ gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+ A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+ A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+ A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+ if (!adreno_is_a20x(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+ gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+ gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+ /* note: gsl doesn't set this */
+ gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+ A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+ gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+ AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+ AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB1_INT_MASK |
+ AXXX_CP_INT_CNTL_RB_INT_MASK);
+ gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+ A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+ A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+ A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+ for (i = 3; i <= 5; i++)
+ if ((SZ_16K << i) == adreno_gpu->gmem)
+ break;
+ gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a2xx_dump(gpu);
+
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+ gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+ adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+ A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t mstatus, status;
+
+ mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+ dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+ dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+ gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+ status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+ /* only RB_INT is expected */
+ if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+ dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+ dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+ }
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+ return state;
+}
+
+static struct msm_gem_address_space *
+a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ 0xfff * SZ_64K);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = a2xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = a2xx_create_address_space,
+ .get_rptr = a2xx_get_rptr,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+ struct a2xx_gpu *a2xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a2xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+ if (!a2xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a2xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ if (adreno_is_a20x(adreno_gpu))
+ adreno_gpu->registers = a200_registers;
+ else if (adreno_is_a225(adreno_gpu))
+ adreno_gpu->registers = a225_registers;
+ else
+ adreno_gpu->registers = a220_registers;
+
+ if (!gpu->aspace) {
+ dev_err(dev->dev, "No memory protection without MMU\n");
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
+ }
+
+ return gpu;
+
+fail:
+ if (a2xx_gpu)
+ a2xx_destroy(&a2xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644
index 000000000..02fba2cb8
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+ struct adreno_gpu base;
+ bool pm_enabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 000000000..520ae3f37
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,3247 @@
+#ifndef A3XX_XML
+#define A3XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a3xx_tile_mode {
+ LINEAR = 0,
+ TILE_4X4 = 1,
+ TILE_32X32 = 2,
+ TILE_4X2 = 3,
+};
+
+enum a3xx_state_block_id {
+ HLSQ_BLOCK_ID_TP_TEX = 2,
+ HLSQ_BLOCK_ID_TP_MIPMAP = 3,
+ HLSQ_BLOCK_ID_SP_VS = 4,
+ HLSQ_BLOCK_ID_SP_FS = 6,
+};
+
+enum a3xx_cache_opcode {
+ INVALIDATE = 1,
+};
+
+enum a3xx_vtx_fmt {
+ VFMT_32_FLOAT = 0,
+ VFMT_32_32_FLOAT = 1,
+ VFMT_32_32_32_FLOAT = 2,
+ VFMT_32_32_32_32_FLOAT = 3,
+ VFMT_16_FLOAT = 4,
+ VFMT_16_16_FLOAT = 5,
+ VFMT_16_16_16_FLOAT = 6,
+ VFMT_16_16_16_16_FLOAT = 7,
+ VFMT_32_FIXED = 8,
+ VFMT_32_32_FIXED = 9,
+ VFMT_32_32_32_FIXED = 10,
+ VFMT_32_32_32_32_FIXED = 11,
+ VFMT_16_SINT = 16,
+ VFMT_16_16_SINT = 17,
+ VFMT_16_16_16_SINT = 18,
+ VFMT_16_16_16_16_SINT = 19,
+ VFMT_16_UINT = 20,
+ VFMT_16_16_UINT = 21,
+ VFMT_16_16_16_UINT = 22,
+ VFMT_16_16_16_16_UINT = 23,
+ VFMT_16_SNORM = 24,
+ VFMT_16_16_SNORM = 25,
+ VFMT_16_16_16_SNORM = 26,
+ VFMT_16_16_16_16_SNORM = 27,
+ VFMT_16_UNORM = 28,
+ VFMT_16_16_UNORM = 29,
+ VFMT_16_16_16_UNORM = 30,
+ VFMT_16_16_16_16_UNORM = 31,
+ VFMT_32_UINT = 32,
+ VFMT_32_32_UINT = 33,
+ VFMT_32_32_32_UINT = 34,
+ VFMT_32_32_32_32_UINT = 35,
+ VFMT_32_SINT = 36,
+ VFMT_32_32_SINT = 37,
+ VFMT_32_32_32_SINT = 38,
+ VFMT_32_32_32_32_SINT = 39,
+ VFMT_8_UINT = 40,
+ VFMT_8_8_UINT = 41,
+ VFMT_8_8_8_UINT = 42,
+ VFMT_8_8_8_8_UINT = 43,
+ VFMT_8_UNORM = 44,
+ VFMT_8_8_UNORM = 45,
+ VFMT_8_8_8_UNORM = 46,
+ VFMT_8_8_8_8_UNORM = 47,
+ VFMT_8_SINT = 48,
+ VFMT_8_8_SINT = 49,
+ VFMT_8_8_8_SINT = 50,
+ VFMT_8_8_8_8_SINT = 51,
+ VFMT_8_SNORM = 52,
+ VFMT_8_8_SNORM = 53,
+ VFMT_8_8_8_SNORM = 54,
+ VFMT_8_8_8_8_SNORM = 55,
+ VFMT_10_10_10_2_UINT = 56,
+ VFMT_10_10_10_2_UNORM = 57,
+ VFMT_10_10_10_2_SINT = 58,
+ VFMT_10_10_10_2_SNORM = 59,
+ VFMT_2_10_10_10_UINT = 60,
+ VFMT_2_10_10_10_UNORM = 61,
+ VFMT_2_10_10_10_SINT = 62,
+ VFMT_2_10_10_10_SNORM = 63,
+ VFMT_NONE = 255,
+};
+
+enum a3xx_tex_fmt {
+ TFMT_5_6_5_UNORM = 4,
+ TFMT_5_5_5_1_UNORM = 5,
+ TFMT_4_4_4_4_UNORM = 7,
+ TFMT_Z16_UNORM = 9,
+ TFMT_X8Z24_UNORM = 10,
+ TFMT_Z32_FLOAT = 11,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
+ TFMT_I420_Y = 24,
+ TFMT_I420_U = 26,
+ TFMT_I420_V = 27,
+ TFMT_ATC_RGB = 32,
+ TFMT_ATC_RGBA_EXPLICIT = 33,
+ TFMT_ETC1 = 34,
+ TFMT_ATC_RGBA_INTERPOLATED = 35,
+ TFMT_DXT1 = 36,
+ TFMT_DXT3 = 37,
+ TFMT_DXT5 = 38,
+ TFMT_2_10_10_10_UNORM = 40,
+ TFMT_10_10_10_2_UNORM = 41,
+ TFMT_9_9_9_E5_FLOAT = 42,
+ TFMT_11_11_10_FLOAT = 43,
+ TFMT_A8_UNORM = 44,
+ TFMT_L8_UNORM = 45,
+ TFMT_L8_A8_UNORM = 47,
+ TFMT_8_UNORM = 48,
+ TFMT_8_8_UNORM = 49,
+ TFMT_8_8_8_UNORM = 50,
+ TFMT_8_8_8_8_UNORM = 51,
+ TFMT_8_SNORM = 52,
+ TFMT_8_8_SNORM = 53,
+ TFMT_8_8_8_SNORM = 54,
+ TFMT_8_8_8_8_SNORM = 55,
+ TFMT_8_UINT = 56,
+ TFMT_8_8_UINT = 57,
+ TFMT_8_8_8_UINT = 58,
+ TFMT_8_8_8_8_UINT = 59,
+ TFMT_8_SINT = 60,
+ TFMT_8_8_SINT = 61,
+ TFMT_8_8_8_SINT = 62,
+ TFMT_8_8_8_8_SINT = 63,
+ TFMT_16_FLOAT = 64,
+ TFMT_16_16_FLOAT = 65,
+ TFMT_16_16_16_16_FLOAT = 67,
+ TFMT_16_UINT = 68,
+ TFMT_16_16_UINT = 69,
+ TFMT_16_16_16_16_UINT = 71,
+ TFMT_16_SINT = 72,
+ TFMT_16_16_SINT = 73,
+ TFMT_16_16_16_16_SINT = 75,
+ TFMT_16_UNORM = 76,
+ TFMT_16_16_UNORM = 77,
+ TFMT_16_16_16_16_UNORM = 79,
+ TFMT_16_SNORM = 80,
+ TFMT_16_16_SNORM = 81,
+ TFMT_16_16_16_16_SNORM = 83,
+ TFMT_32_FLOAT = 84,
+ TFMT_32_32_FLOAT = 85,
+ TFMT_32_32_32_32_FLOAT = 87,
+ TFMT_32_UINT = 88,
+ TFMT_32_32_UINT = 89,
+ TFMT_32_32_32_32_UINT = 91,
+ TFMT_32_SINT = 92,
+ TFMT_32_32_SINT = 93,
+ TFMT_32_32_32_32_SINT = 95,
+ TFMT_2_10_10_10_UINT = 96,
+ TFMT_10_10_10_2_UINT = 97,
+ TFMT_ETC2_RG11_SNORM = 112,
+ TFMT_ETC2_RG11_UNORM = 113,
+ TFMT_ETC2_R11_SNORM = 114,
+ TFMT_ETC2_R11_UNORM = 115,
+ TFMT_ETC2_RGBA8 = 116,
+ TFMT_ETC2_RGB8A1 = 117,
+ TFMT_ETC2_RGB8 = 118,
+ TFMT_NONE = 255,
+};
+
+enum a3xx_color_fmt {
+ RB_R5G6B5_UNORM = 0,
+ RB_R5G5B5A1_UNORM = 1,
+ RB_R4G4B4A4_UNORM = 3,
+ RB_R8G8B8_UNORM = 4,
+ RB_R8G8B8A8_UNORM = 8,
+ RB_R8G8B8A8_SNORM = 9,
+ RB_R8G8B8A8_UINT = 10,
+ RB_R8G8B8A8_SINT = 11,
+ RB_R8G8_UNORM = 12,
+ RB_R8G8_SNORM = 13,
+ RB_R8G8_UINT = 14,
+ RB_R8G8_SINT = 15,
+ RB_R10G10B10A2_UNORM = 16,
+ RB_A2R10G10B10_UNORM = 17,
+ RB_R10G10B10A2_UINT = 18,
+ RB_A2R10G10B10_UINT = 19,
+ RB_A8_UNORM = 20,
+ RB_R8_UNORM = 21,
+ RB_R16_FLOAT = 24,
+ RB_R16G16_FLOAT = 25,
+ RB_R16G16B16A16_FLOAT = 27,
+ RB_R11G11B10_FLOAT = 28,
+ RB_R16_SNORM = 32,
+ RB_R16G16_SNORM = 33,
+ RB_R16G16B16A16_SNORM = 35,
+ RB_R16_UNORM = 36,
+ RB_R16G16_UNORM = 37,
+ RB_R16G16B16A16_UNORM = 39,
+ RB_R16_SINT = 40,
+ RB_R16G16_SINT = 41,
+ RB_R16G16B16A16_SINT = 43,
+ RB_R16_UINT = 44,
+ RB_R16G16_UINT = 45,
+ RB_R16G16B16A16_UINT = 47,
+ RB_R32_FLOAT = 48,
+ RB_R32G32_FLOAT = 49,
+ RB_R32G32B32A32_FLOAT = 51,
+ RB_R32_SINT = 52,
+ RB_R32G32_SINT = 53,
+ RB_R32G32B32A32_SINT = 55,
+ RB_R32_UINT = 56,
+ RB_R32G32_UINT = 57,
+ RB_R32G32B32A32_UINT = 59,
+ RB_NONE = 255,
+};
+
+enum a3xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_AHB_PFPTRANS_WAIT = 3,
+ CP_AHB_NRTTRANS_WAIT = 6,
+ CP_CSF_NRT_READ_WAIT = 8,
+ CP_CSF_I1_FIFO_FULL = 9,
+ CP_CSF_I2_FIFO_FULL = 10,
+ CP_CSF_ST_FIFO_FULL = 11,
+ CP_RESERVED_12 = 12,
+ CP_CSF_RING_ROQ_FULL = 13,
+ CP_CSF_I1_ROQ_FULL = 14,
+ CP_CSF_I2_ROQ_FULL = 15,
+ CP_CSF_ST_ROQ_FULL = 16,
+ CP_RESERVED_17 = 17,
+ CP_MIU_TAG_MEM_FULL = 18,
+ CP_MIU_NRT_WRITE_STALLED = 22,
+ CP_MIU_NRT_READ_STALLED = 23,
+ CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+ CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ CP_ME_MICRO_RB_STARVED = 30,
+ CP_AHB_RBBM_DWORD_SENT = 40,
+ CP_ME_BUSY_CLOCKS = 41,
+ CP_ME_WAIT_CONTEXT_AVAIL = 42,
+ CP_PFP_TYPE0_PACKET = 43,
+ CP_PFP_TYPE3_PACKET = 44,
+ CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+ CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+ CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+ CP_CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a3xx_gras_tse_perfcounter_select {
+ GRAS_TSEPERF_INPUT_PRIM = 0,
+ GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+ GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+ GRAS_TSEPERF_CLIPPED_PRIM = 3,
+ GRAS_TSEPERF_NEW_PRIM = 4,
+ GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+ GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+ GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+ GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+ GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+ GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+ GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+ GRAS_TSEPERF_WORKING_CYCLES = 12,
+ GRAS_TSEPERF_PC_STARVE = 13,
+ GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+ GRAS_RASPERF_16X16_TILES = 0,
+ GRAS_RASPERF_8X8_TILES = 1,
+ GRAS_RASPERF_4X4_TILES = 2,
+ GRAS_RASPERF_WORKING_CYCLES = 3,
+ GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+ GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+ GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+ HLSQ_PERF_SP_VS_CONSTANT = 0,
+ HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+ HLSQ_PERF_SP_FS_CONSTANT = 2,
+ HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+ HLSQ_PERF_TP_STATE = 4,
+ HLSQ_PERF_QUADS = 5,
+ HLSQ_PERF_PIXELS = 6,
+ HLSQ_PERF_VERTICES = 7,
+ HLSQ_PERF_FS8_THREADS = 8,
+ HLSQ_PERF_FS16_THREADS = 9,
+ HLSQ_PERF_FS32_THREADS = 10,
+ HLSQ_PERF_VS8_THREADS = 11,
+ HLSQ_PERF_VS16_THREADS = 12,
+ HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+ HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+ HLSQ_PERF_ACTIVE_CYCLES = 15,
+ HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+ HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+ HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+ HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+ HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+ HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+ HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+ HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+ HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+ PC_PCPERF_VISIBILITY_STREAMS = 0,
+ PC_PCPERF_TOTAL_INSTANCES = 1,
+ PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+ PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+ PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+ PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+ PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+ PC_PCPERF_VERTICES_TO_VFD = 7,
+ PC_PCPERF_REUSED_VERTICES = 8,
+ PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+ PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+ PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+ PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+ RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+ RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+ RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+ RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+ RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+ RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+ RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+ RB_RBPERF_RB_MARB_DATA = 7,
+ RB_RBPERF_SP_RB_QUAD = 8,
+ RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+ RB_RBPERF_GMEM_CH0_READ = 10,
+ RB_RBPERF_GMEM_CH1_READ = 11,
+ RB_RBPERF_GMEM_CH0_WRITE = 12,
+ RB_RBPERF_GMEM_CH1_WRITE = 13,
+ RB_RBPERF_CP_CONTEXT_DONE = 14,
+ RB_RBPERF_CP_CACHE_FLUSH = 15,
+ RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+ RBBM_ALAWYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TEX_BUSY = 12,
+ RBBM_ANY_USP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_RBBM_STATUS_MASKED = 22,
+};
+
+enum a3xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_UCHE_LOAD_INSTRUCTIONS = 3,
+ SP_UCHE_STORE_INSTRUCTIONS = 4,
+ SP_UCHE_ATOMICS = 5,
+ SP_VS_TEX_INSTRUCTIONS = 6,
+ SP_VS_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_EFU_INSTRUCTIONS = 8,
+ SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_TEX_INSTRUCTIONS = 11,
+ SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_EFU_INSTRUCTIONS = 13,
+ SP_FS_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+ SP_FS_BARY_INSTRUCTIONS = 16,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_ACTIVE_CYCLES = 29,
+ SP_EFU_ACTIVE_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_ACTIVE_CYCLES_ANY = 35,
+ SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+ TPL1_TPPERF_L1_REQUESTS = 0,
+ TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+ TPL1_TPPERF_TP0_L1_MISSES = 2,
+ TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+ TPL1_TPPERF_TP1_L1_MISSES = 4,
+ TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+ TPL1_TPPERF_TP2_L1_MISSES = 6,
+ TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+ TPL1_TPPERF_TP3_L1_MISSES = 8,
+ TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+ TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+ TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+ TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+ TPL1_TPPERF_BILINEAR_OPS = 13,
+ TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+ TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+ TPL1_TPPERF_QUADS_ARRAY = 16,
+ TPL1_TPPERF_QUADS_PROJECTION = 17,
+ TPL1_TPPERF_QUADS_GRADIENT = 18,
+ TPL1_TPPERF_QUADS_1D2D = 19,
+ TPL1_TPPERF_QUADS_3DCUBE = 20,
+ TPL1_TPPERF_ZERO_LOD = 21,
+ TPL1_TPPERF_OUTPUT_TEXELS = 22,
+ TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+ TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+ TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+ TPL1_TPPERF_LATENCY = 26,
+ TPL1_TPPERF_LATENCY_TRANS = 27,
+};
+
+enum a3xx_vfd_perfcounter_select {
+ VFD_PERF_UCHE_BYTE_FETCHED = 0,
+ VFD_PERF_UCHE_TRANS = 1,
+ VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+ VFD_PERF_FETCH_INSTRUCTIONS = 3,
+ VFD_PERF_DECODE_INSTRUCTIONS = 4,
+ VFD_PERF_ACTIVE_CYCLES = 5,
+ VFD_PERF_STALL_CYCLES_UCHE = 6,
+ VFD_PERF_STALL_CYCLES_HLSQ = 7,
+ VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
+};
+
+enum a3xx_vpc_perfcounter_select {
+ VPC_PERF_SP_LM_PRIMITIVES = 0,
+ VPC_PERF_COMPONENTS_FROM_SP = 1,
+ VPC_PERF_SP_LM_COMPONENTS = 2,
+ VPC_PERF_ACTIVE_CYCLES = 3,
+ VPC_PERF_STALL_CYCLES_LM = 4,
+ VPC_PERF_STALL_CYCLES_RAS = 5,
+};
+
+enum a3xx_uche_perfcounter_select {
+ UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+ UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+ UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+ UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+ UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+ UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+ UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+ UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+ UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+ UCHE_UCHEPERF_EVICTS = 16,
+ UCHE_UCHEPERF_FLUSHES = 17,
+ UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+ UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+ UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
+};
+
+enum a3xx_intp_mode {
+ SMOOTH = 0,
+ FLAT = 1,
+ ZERO = 2,
+ ONE = 3,
+};
+
+enum a3xx_repl_mode {
+ S = 1,
+ T = 2,
+ ONE_T = 3,
+};
+
+enum a3xx_tex_filter {
+ A3XX_TEX_NEAREST = 0,
+ A3XX_TEX_LINEAR = 1,
+ A3XX_TEX_ANISO = 2,
+};
+
+enum a3xx_tex_clamp {
+ A3XX_TEX_REPEAT = 0,
+ A3XX_TEX_CLAMP_TO_EDGE = 1,
+ A3XX_TEX_MIRROR_REPEAT = 2,
+ A3XX_TEX_CLAMP_TO_BORDER = 3,
+ A3XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a3xx_tex_aniso {
+ A3XX_TEX_ANISO_1 = 0,
+ A3XX_TEX_ANISO_2 = 1,
+ A3XX_TEX_ANISO_4 = 2,
+ A3XX_TEX_ANISO_8 = 3,
+ A3XX_TEX_ANISO_16 = 4,
+};
+
+enum a3xx_tex_swiz {
+ A3XX_TEX_X = 0,
+ A3XX_TEX_Y = 1,
+ A3XX_TEX_Z = 2,
+ A3XX_TEX_W = 3,
+ A3XX_TEX_ZERO = 4,
+ A3XX_TEX_ONE = 5,
+};
+
+enum a3xx_tex_type {
+ A3XX_TEX_1D = 0,
+ A3XX_TEX_2D = 1,
+ A3XX_TEX_CUBE = 2,
+ A3XX_TEX_3D = 3,
+};
+
+enum a3xx_tex_msaa {
+ A3XX_TPL1_MSAA1X = 0,
+ A3XX_TPL1_MSAA2X = 1,
+ A3XX_TPL1_MSAA4X = 2,
+ A3XX_TPL1_MSAA8X = 3,
+};
+
+#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A3XX_INT0_VFD_ERROR 0x00000040
+#define A3XX_INT0_CP_SW_INT 0x00000080
+#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A3XX_INT0_CP_HW_FAULT 0x00000800
+#define A3XX_INT0_CP_DMA 0x00001000
+#define A3XX_INT0_CP_IB2_INT 0x00002000
+#define A3XX_INT0_CP_IB1_INT 0x00004000
+#define A3XX_INT0_CP_RB_INT 0x00008000
+#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A3XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
+
+#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
+
+#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
+
+#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
+
+#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
+
+#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
+
+#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
+
+#define REG_A3XX_RBBM_AHB_CMD 0x00000022
+
+#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
+
+#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
+
+#define REG_A3XX_RBBM_STATUS 0x00000030
+#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040
+
+#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
+
+#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
+
+#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
+
+#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
+
+#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
+
+#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
+
+#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
+
+#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
+
+#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
+
+#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
+
+#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
+
+#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
+
+#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
+
+#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
+
+#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
+
+#define REG_A3XX_CP_ROQ_DATA 0x000001cd
+
+#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
+
+#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
+
+#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
+
+#define REG_A3XX_CP_MEQ_ADDR 0x000001da
+
+#define REG_A3XX_CP_MEQ_DATA 0x000001db
+
+#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
+
+#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
+
+#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A3XX_CP_HW_FAULT 0x0000045c
+
+#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
+
+#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
+
+static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+
+#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
+
+#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+
+#define REG_A3XX_TP0_CHICKEN 0x00000e1e
+
+#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
+
+#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
+
+#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER 0x00002000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID 0x00004000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID 0x00008000
+#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
+#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
+#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
+#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK 0x1c000000
+#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT 26
+static inline uint32_t A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT) & A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
+#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
+#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
+#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
+#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
+#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
+{
+ return ((((int32_t)(val * 1048576.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((((int32_t)(val * 64.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+
+#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
+static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
+#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
+static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_RB_MODE_CONTROL_MRT__MASK 0x00003000
+#define A3XX_RB_MODE_CONTROL_MRT__SHIFT 12
+static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val)
+{
+ return ((val) << A3XX_RB_MODE_CONTROL_MRT__SHIFT) & A3XX_RB_MODE_CONTROL_MRT__MASK;
+}
+#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
+#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
+
+#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE 0x00000001
+#define A3XX_RB_RENDER_CONTROL_YUV_IN_ENABLE 0x00000002
+#define A3XX_RB_RENDER_CONTROL_COV_VALUE_INPUT_ENABLE 0x00000004
+#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
+static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
+#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK 0x0003c000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT 14
+static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000
+#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
+static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_COVERAGE 0x40000000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_ONE 0x80000000
+
+#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
+#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A3XX_RB_ALPHA_REF 0x000020c3
+#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
+#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
+static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
+}
+#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
+#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
+
+#define REG_A3XX_RB_BLEND_RED 0x000020e4
+#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
+#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
+#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
+#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb
+
+#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008
+#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
+static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
+#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
+#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
+#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
+#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
+#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002
+#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000
+
+#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
+
+#define REG_A3XX_RB_DEPTH_INFO 0x00002102
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
+#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A3XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
+
+#define REG_A3XX_RB_STENCIL_INFO 0x00002106
+#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff800
+#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11
+static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_PITCH 0x00002107
+#define A3XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A3XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 3) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_STENCILREFMASK 0x00002108
+#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c
+#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002
+
+#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e
+#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff
+#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000
+#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
+
+#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114
+
+#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+
+#define REG_A3XX_VGT_BIN_BASE 0x000021e1
+
+#define REG_A3XX_VGT_BIN_SIZE 0x000021e2
+
+#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
+#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
+
+#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_ENABLE 0x00001000
+#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
+#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
+
+#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
+
+#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100
+#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000
+#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK 0x000000ff
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK 0x0000ff00
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT 8
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK;
+}
+
+#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
+}
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; }
+
+#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
+
+#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
+
+#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
+
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
+
+#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
+
+#define REG_A3XX_VFD_CONTROL_0 0x00002240
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
+static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A3XX_VFD_CONTROL_1 0x00002241
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A3XX_VFD_INDEX_MIN 0x00002242
+
+#define REG_A3XX_VFD_INDEX_MAX 0x00002243
+
+#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
+
+#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+
+static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000
+#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
+#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
+}
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
+}
+
+#define REG_A3XX_VPC_ATTR 0x00002280
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
+#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A3XX_VPC_ATTR_PSIZE 0x00000200
+#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
+#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
+#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
+static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
+}
+
+#define REG_A3XX_VPC_PACK 0x00002281
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
+}
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT 0
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C0(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK 0x0000000c
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT 2
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C1(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK 0x00000030
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT 4
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C2(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK 0x000000c0
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT 6
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C3(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK 0x00000300
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT 8
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C4(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK 0x00000c00
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT 10
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C5(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK 0x00003000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT 12
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C6(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK 0x0000c000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT 14
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C7(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK 0x00030000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT 16
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C8(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK 0x000c0000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT 18
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C9(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK 0x00300000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT 20
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CA(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK 0x00c00000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT 22
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CB(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK 0x03000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT 24
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CC(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK 0x0c000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT 26
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CD(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK 0x30000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT 28
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CE(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK 0xc0000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT 30
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CF(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK;
+}
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
+
+#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
+#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
+static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
+static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
+#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
+
+#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
+
+#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
+
+#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
+
+#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
+
+#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
+
+#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+#define A3XX_SP_FS_OUTPUT_REG_MRT__MASK 0x00000003
+#define A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A3XX_SP_FS_OUTPUT_REG_MRT__MASK;
+}
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
+#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
+static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
+
+#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
+
+#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
+
+#define REG_A3XX_VBIF_CLKON 0x00003001
+
+#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
+
+#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
+
+#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
+
+#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
+
+#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
+
+#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+
+#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070
+#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071
+#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072
+
+#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073
+
+#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074
+
+#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075
+
+#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c
+
+#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
+#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
+
+static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
+#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
+#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
+#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
+#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
+}
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c
+#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001
+
+#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
+
+#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81
+
+#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
+
+#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1
+
+#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
+
+#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
+
+#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
+
+#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
+
+#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
+
+#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
+
+#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
+
+#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
+
+#define REG_A3XX_UNKNOWN_0E43 0x00000e43
+
+#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
+
+#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
+
+#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
+
+#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
+
+#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
+
+#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
+
+#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
+
+#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
+
+#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
+
+#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
+
+#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
+}
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+
+#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6
+
+#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
+
+#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
+
+#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
+
+#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
+
+#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
+
+#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
+
+#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
+
+#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
+
+#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
+
+#define REG_A3XX_UNKNOWN_0F03 0x00000f03
+
+#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
+
+#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
+
+#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
+
+#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0
+
+#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
+}
+
+#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
+
+#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001
+#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
+#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
+#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
+#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
+#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
+#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
+#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000
+#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15
+static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
+static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
+}
+#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000
+#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
+
+#define REG_A3XX_TEX_SAMP_1 0x00000001
+#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
+#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
+static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
+}
+#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
+#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
+#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
+static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_0 0x00000000
+#define A3XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A3XX_TEX_CONST_0_SRGB 0x00000004
+#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000
+#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20
+static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
+}
+#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A3XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
+{
+ return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
+}
+#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000
+#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
+#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_1 0x00000001
+#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
+#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
+#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
+static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A3XX_TEX_CONST_1_PITCHALIGN__MASK 0xf0000000
+#define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_2 0x00000002
+#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff
+#define A3XX_TEX_CONST_2_INDX__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
+}
+#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
+#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
+static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_3 0x00000003
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff
+#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
+}
+#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
+#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
+static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
+}
+#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
+#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+
+#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 000000000..2c8b98996
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#include "a3xx_gpu.h"
+
+#define A3XX_INT0_MASK \
+ (A3XX_INT0_RBBM_AHB_ERROR | \
+ A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A3XX_INT0_CP_T0_PACKET_IN_IB | \
+ A3XX_INT0_CP_OPCODE_ERROR | \
+ A3XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A3XX_INT0_CP_HW_FAULT | \
+ A3XX_INT0_CP_IB1_INT | \
+ A3XX_INT0_CP_IB2_INT | \
+ A3XX_INT0_CP_RB_INT | \
+ A3XX_INT0_CP_REG_PROTECT_FAULT | \
+ A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_CACHE_FLUSH_TS | \
+ A3XX_INT0_UCHE_OOB_ACCESS)
+
+extern bool hang_debug;
+
+static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
+
+static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+
+#if 0
+ /* Dummy set-constant to trigger context rollover */
+ OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+ OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+ OUT_RING(ring, 0x00000000);
+#endif
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
+static bool a3xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+ return a3xx_idle(gpu);
+}
+
+static int a3xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ DBG("%s", gpu->name);
+
+ if (adreno_is_a305(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ } else if (adreno_is_a306(adreno_gpu)) {
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
+ } else if (adreno_is_a320(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+
+ } else if (adreno_is_a330v2(adreno_gpu)) {
+ /*
+ * Most of the VBIF registers on 8974v2 have the correct
+ * values at power on, so we won't modify those if we don't
+ * need to
+ */
+ /* Enable 1k sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Disable VBIF clock gating. This is to enable AXI running
+ * higher frequency than GPU:
+ */
+ gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
+
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter: */
+ gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection: */
+ gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits. This lets us get
+ * useful information on failure:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting: */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Turn on the power counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
+
+ /* Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
+
+ /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
+ gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
+
+ /* Enable Clock gating: */
+ if (adreno_is_a306(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+ else if (adreno_is_a320(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+ else if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
+
+ if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
+
+ /* Set the OCMEM base address for A330, etc */
+ if (a3xx_gpu->ocmem.hdl) {
+ gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a3xx_gpu->ocmem.base >> 14));
+ }
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Enable the perfcntrs that we use.. */
+ for (i = 0; i < gpu->num_perfcntrs; i++) {
+ const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
+ gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
+ }
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
+
+ /* RB registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
+
+ /* VBIF registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
+ if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
+ adreno_is_a320(adreno_gpu)) {
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* NOTE: this (value take from downstream android driver)
+ * includes some bits outside of the known bitfields. But
+ * A330 has this "MERCIU queue" thing too, which might
+ * explain a new bitfield or reshuffling:
+ */
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
+ }
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a3xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a3xx_dump(gpu);
+
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a3xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
+
+ kfree(a3xx_gpu);
+}
+
+static bool a3xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
+ DBG("%s: %08x", gpu->name, status);
+
+ // TODO
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a3xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
+ 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
+ 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
+ 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
+ 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
+ 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
+ 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
+ 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
+ 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
+ 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
+ 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
+ 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
+ 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
+ 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
+ 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
+ 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
+ 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
+ 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
+ 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
+ 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
+ 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
+ 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
+ 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
+ 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
+ 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
+ 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+ 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
+ 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a3xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A3XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
+static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a3xx_recover,
+ .submit = a3xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
+ .get_rptr = a3xx_get_rptr,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+ { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
+ SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
+ { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
+ SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
+};
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+{
+ struct a3xx_gpu *a3xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct icc_path *ocmem_icc_path;
+ struct icc_path *icc_path;
+ int ret;
+
+ if (!pdev) {
+ DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
+ if (!a3xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a3xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ adreno_gpu->registers = a3xx_registers;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ /* if needed, allocate gmem: */
+ if (adreno_is_a330(adreno_gpu)) {
+ ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
+ adreno_gpu, &a3xx_gpu->ocmem);
+ if (ret)
+ goto fail;
+ }
+
+ if (!gpu->aspace) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
+ }
+
+ icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
+ goto fail;
+ }
+
+ ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
+ /* allow -ENODATA, ocmem icc is optional */
+ if (ret != -ENODATA)
+ goto fail;
+ ocmem_icc_path = NULL;
+ }
+
+
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
+ return gpu;
+
+fail:
+ if (a3xx_gpu)
+ a3xx_destroy(&a3xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 000000000..c555fb13e
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __A3XX_GPU_H__
+#define __A3XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a3xx.xml.h"
+
+struct a3xx_gpu {
+ struct adreno_gpu base;
+
+ /* if OCMEM is used for GMEM: */
+ struct adreno_ocmem ocmem;
+};
+#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+
+#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
new file mode 100644
index 000000000..7e5c21015
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -0,0 +1,4349 @@
+#ifndef A4XX_XML
+#define A4XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a4xx_color_fmt {
+ RB4_A8_UNORM = 1,
+ RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
+ RB4_R4G4B4A4_UNORM = 8,
+ RB4_R5G5B5A1_UNORM = 10,
+ RB4_R5G6B5_UNORM = 14,
+ RB4_R8G8_UNORM = 15,
+ RB4_R8G8_SNORM = 16,
+ RB4_R8G8_UINT = 17,
+ RB4_R8G8_SINT = 18,
+ RB4_R16_UNORM = 19,
+ RB4_R16_SNORM = 20,
+ RB4_R16_FLOAT = 21,
+ RB4_R16_UINT = 22,
+ RB4_R16_SINT = 23,
+ RB4_R8G8B8_UNORM = 25,
+ RB4_R8G8B8A8_UNORM = 26,
+ RB4_R8G8B8A8_SNORM = 28,
+ RB4_R8G8B8A8_UINT = 29,
+ RB4_R8G8B8A8_SINT = 30,
+ RB4_R10G10B10A2_UNORM = 31,
+ RB4_R10G10B10A2_UINT = 34,
+ RB4_R11G11B10_FLOAT = 39,
+ RB4_R16G16_UNORM = 40,
+ RB4_R16G16_SNORM = 41,
+ RB4_R16G16_FLOAT = 42,
+ RB4_R16G16_UINT = 43,
+ RB4_R16G16_SINT = 44,
+ RB4_R32_FLOAT = 45,
+ RB4_R32_UINT = 46,
+ RB4_R32_SINT = 47,
+ RB4_R16G16B16A16_UNORM = 52,
+ RB4_R16G16B16A16_SNORM = 53,
+ RB4_R16G16B16A16_FLOAT = 54,
+ RB4_R16G16B16A16_UINT = 55,
+ RB4_R16G16B16A16_SINT = 56,
+ RB4_R32G32_FLOAT = 57,
+ RB4_R32G32_UINT = 58,
+ RB4_R32G32_SINT = 59,
+ RB4_R32G32B32A32_FLOAT = 60,
+ RB4_R32G32B32A32_UINT = 61,
+ RB4_R32G32B32A32_SINT = 62,
+ RB4_NONE = 255,
+};
+
+enum a4xx_tile_mode {
+ TILE4_LINEAR = 0,
+ TILE4_2 = 2,
+ TILE4_3 = 3,
+};
+
+enum a4xx_vtx_fmt {
+ VFMT4_32_FLOAT = 1,
+ VFMT4_32_32_FLOAT = 2,
+ VFMT4_32_32_32_FLOAT = 3,
+ VFMT4_32_32_32_32_FLOAT = 4,
+ VFMT4_16_FLOAT = 5,
+ VFMT4_16_16_FLOAT = 6,
+ VFMT4_16_16_16_FLOAT = 7,
+ VFMT4_16_16_16_16_FLOAT = 8,
+ VFMT4_32_FIXED = 9,
+ VFMT4_32_32_FIXED = 10,
+ VFMT4_32_32_32_FIXED = 11,
+ VFMT4_32_32_32_32_FIXED = 12,
+ VFMT4_11_11_10_FLOAT = 13,
+ VFMT4_16_SINT = 16,
+ VFMT4_16_16_SINT = 17,
+ VFMT4_16_16_16_SINT = 18,
+ VFMT4_16_16_16_16_SINT = 19,
+ VFMT4_16_UINT = 20,
+ VFMT4_16_16_UINT = 21,
+ VFMT4_16_16_16_UINT = 22,
+ VFMT4_16_16_16_16_UINT = 23,
+ VFMT4_16_SNORM = 24,
+ VFMT4_16_16_SNORM = 25,
+ VFMT4_16_16_16_SNORM = 26,
+ VFMT4_16_16_16_16_SNORM = 27,
+ VFMT4_16_UNORM = 28,
+ VFMT4_16_16_UNORM = 29,
+ VFMT4_16_16_16_UNORM = 30,
+ VFMT4_16_16_16_16_UNORM = 31,
+ VFMT4_32_UINT = 32,
+ VFMT4_32_32_UINT = 33,
+ VFMT4_32_32_32_UINT = 34,
+ VFMT4_32_32_32_32_UINT = 35,
+ VFMT4_32_SINT = 36,
+ VFMT4_32_32_SINT = 37,
+ VFMT4_32_32_32_SINT = 38,
+ VFMT4_32_32_32_32_SINT = 39,
+ VFMT4_8_UINT = 40,
+ VFMT4_8_8_UINT = 41,
+ VFMT4_8_8_8_UINT = 42,
+ VFMT4_8_8_8_8_UINT = 43,
+ VFMT4_8_UNORM = 44,
+ VFMT4_8_8_UNORM = 45,
+ VFMT4_8_8_8_UNORM = 46,
+ VFMT4_8_8_8_8_UNORM = 47,
+ VFMT4_8_SINT = 48,
+ VFMT4_8_8_SINT = 49,
+ VFMT4_8_8_8_SINT = 50,
+ VFMT4_8_8_8_8_SINT = 51,
+ VFMT4_8_SNORM = 52,
+ VFMT4_8_8_SNORM = 53,
+ VFMT4_8_8_8_SNORM = 54,
+ VFMT4_8_8_8_8_SNORM = 55,
+ VFMT4_10_10_10_2_UINT = 56,
+ VFMT4_10_10_10_2_UNORM = 57,
+ VFMT4_10_10_10_2_SINT = 58,
+ VFMT4_10_10_10_2_SNORM = 59,
+ VFMT4_2_10_10_10_UINT = 60,
+ VFMT4_2_10_10_10_UNORM = 61,
+ VFMT4_2_10_10_10_SINT = 62,
+ VFMT4_2_10_10_10_SNORM = 63,
+ VFMT4_NONE = 255,
+};
+
+enum a4xx_tex_fmt {
+ TFMT4_A8_UNORM = 3,
+ TFMT4_8_UNORM = 4,
+ TFMT4_8_SNORM = 5,
+ TFMT4_8_UINT = 6,
+ TFMT4_8_SINT = 7,
+ TFMT4_4_4_4_4_UNORM = 8,
+ TFMT4_5_5_5_1_UNORM = 9,
+ TFMT4_5_6_5_UNORM = 11,
+ TFMT4_L8_A8_UNORM = 13,
+ TFMT4_8_8_UNORM = 14,
+ TFMT4_8_8_SNORM = 15,
+ TFMT4_8_8_UINT = 16,
+ TFMT4_8_8_SINT = 17,
+ TFMT4_16_UNORM = 18,
+ TFMT4_16_SNORM = 19,
+ TFMT4_16_FLOAT = 20,
+ TFMT4_16_UINT = 21,
+ TFMT4_16_SINT = 22,
+ TFMT4_8_8_8_8_UNORM = 28,
+ TFMT4_8_8_8_8_SNORM = 29,
+ TFMT4_8_8_8_8_UINT = 30,
+ TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_9_9_9_E5_FLOAT = 32,
+ TFMT4_10_10_10_2_UNORM = 33,
+ TFMT4_10_10_10_2_UINT = 34,
+ TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_16_16_UNORM = 38,
+ TFMT4_16_16_SNORM = 39,
+ TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_UINT = 41,
+ TFMT4_16_16_SINT = 42,
+ TFMT4_32_FLOAT = 43,
+ TFMT4_32_UINT = 44,
+ TFMT4_32_SINT = 45,
+ TFMT4_16_16_16_16_UNORM = 51,
+ TFMT4_16_16_16_16_SNORM = 52,
+ TFMT4_16_16_16_16_FLOAT = 53,
+ TFMT4_16_16_16_16_UINT = 54,
+ TFMT4_16_16_16_16_SINT = 55,
+ TFMT4_32_32_FLOAT = 56,
+ TFMT4_32_32_UINT = 57,
+ TFMT4_32_32_SINT = 58,
+ TFMT4_32_32_32_FLOAT = 59,
+ TFMT4_32_32_32_UINT = 60,
+ TFMT4_32_32_32_SINT = 61,
+ TFMT4_32_32_32_32_FLOAT = 63,
+ TFMT4_32_32_32_32_UINT = 64,
+ TFMT4_32_32_32_32_SINT = 65,
+ TFMT4_X8Z24_UNORM = 71,
+ TFMT4_DXT1 = 86,
+ TFMT4_DXT3 = 87,
+ TFMT4_DXT5 = 88,
+ TFMT4_RGTC1_UNORM = 90,
+ TFMT4_RGTC1_SNORM = 91,
+ TFMT4_RGTC2_UNORM = 94,
+ TFMT4_RGTC2_SNORM = 95,
+ TFMT4_BPTC_UFLOAT = 97,
+ TFMT4_BPTC_FLOAT = 98,
+ TFMT4_BPTC = 99,
+ TFMT4_ATC_RGB = 100,
+ TFMT4_ATC_RGBA_EXPLICIT = 101,
+ TFMT4_ATC_RGBA_INTERPOLATED = 102,
+ TFMT4_ETC2_RG11_UNORM = 103,
+ TFMT4_ETC2_RG11_SNORM = 104,
+ TFMT4_ETC2_R11_UNORM = 105,
+ TFMT4_ETC2_R11_SNORM = 106,
+ TFMT4_ETC1 = 107,
+ TFMT4_ETC2_RGB8 = 108,
+ TFMT4_ETC2_RGBA8 = 109,
+ TFMT4_ETC2_RGB8A1 = 110,
+ TFMT4_ASTC_4x4 = 111,
+ TFMT4_ASTC_5x4 = 112,
+ TFMT4_ASTC_5x5 = 113,
+ TFMT4_ASTC_6x5 = 114,
+ TFMT4_ASTC_6x6 = 115,
+ TFMT4_ASTC_8x5 = 116,
+ TFMT4_ASTC_8x6 = 117,
+ TFMT4_ASTC_8x8 = 118,
+ TFMT4_ASTC_10x5 = 119,
+ TFMT4_ASTC_10x6 = 120,
+ TFMT4_ASTC_10x8 = 121,
+ TFMT4_ASTC_10x10 = 122,
+ TFMT4_ASTC_12x10 = 123,
+ TFMT4_ASTC_12x12 = 124,
+ TFMT4_NONE = 255,
+};
+
+enum a4xx_depth_format {
+ DEPTH4_NONE = 0,
+ DEPTH4_16 = 1,
+ DEPTH4_24_8 = 2,
+ DEPTH4_32 = 3,
+};
+
+enum a4xx_ccu_perfcounter_select {
+ CCU_BUSY_CYCLES = 0,
+ CCU_RB_DEPTH_RETURN_STALL = 2,
+ CCU_RB_COLOR_RETURN_STALL = 3,
+ CCU_DEPTH_BLOCKS = 6,
+ CCU_COLOR_BLOCKS = 7,
+ CCU_DEPTH_BLOCK_HIT = 8,
+ CCU_COLOR_BLOCK_HIT = 9,
+ CCU_DEPTH_FLAG1_COUNT = 10,
+ CCU_DEPTH_FLAG2_COUNT = 11,
+ CCU_DEPTH_FLAG3_COUNT = 12,
+ CCU_DEPTH_FLAG4_COUNT = 13,
+ CCU_COLOR_FLAG1_COUNT = 14,
+ CCU_COLOR_FLAG2_COUNT = 15,
+ CCU_COLOR_FLAG3_COUNT = 16,
+ CCU_COLOR_FLAG4_COUNT = 17,
+ CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_BUSY = 1,
+ CP_PFP_IDLE = 2,
+ CP_PFP_BUSY_WORKING = 3,
+ CP_PFP_STALL_CYCLES_ANY = 4,
+ CP_PFP_STARVE_CYCLES_ANY = 5,
+ CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+ CP_PFP_STALLED_PER_STORE_ADDR = 7,
+ CP_PFP_PC_PROFILE = 8,
+ CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ CP_PFP_COND_INDIRECT_DISCARDED = 10,
+ CP_LONG_RESUMPTIONS = 11,
+ CP_RESUME_CYCLES = 12,
+ CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+ CP_LONG_PREEMPTIONS = 14,
+ CP_PREEMPT_CYCLES = 15,
+ CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+ CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+ CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+ CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+ CP_ME_FIFO_FULL_ME_BUSY = 20,
+ CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+ CP_ME_WAITING_FOR_PACKETS = 22,
+ CP_ME_BUSY_WORKING = 23,
+ CP_ME_STARVE_CYCLES_ANY = 24,
+ CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+ CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+ CP_ME_PC_PROFILE = 27,
+ CP_RCIU_FIFO_EMPTY = 28,
+ CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+ CP_RCIU_FIFO_FULL = 30,
+ CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+ CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+ CP_RCIU_FIFO_FULL_OTHER = 33,
+ CP_AHB_IDLE = 34,
+ CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+ CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+ CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+ CP_AHB_BUSY_WORKING = 38,
+ CP_AHB_BUSY_STALL_ON_HRDY = 39,
+ CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+ RAS_SUPER_TILES = 0,
+ RAS_8X8_TILES = 1,
+ RAS_4X4_TILES = 2,
+ RAS_BUSY_CYCLES = 3,
+ RAS_STALL_CYCLES_BY_RB = 4,
+ RAS_STALL_CYCLES_BY_VSC = 5,
+ RAS_STARVE_CYCLES_BY_TSE = 6,
+ RAS_SUPERTILE_CYCLES = 7,
+ RAS_TILE_CYCLES = 8,
+ RAS_FULLY_COVERED_SUPER_TILES = 9,
+ RAS_FULLY_COVERED_8X8_TILES = 10,
+ RAS_4X4_PRIM = 11,
+ RAS_8X4_4X8_PRIM = 12,
+ RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+ TSE_INPUT_PRIM = 0,
+ TSE_INPUT_NULL_PRIM = 1,
+ TSE_TRIVAL_REJ_PRIM = 2,
+ TSE_CLIPPED_PRIM = 3,
+ TSE_NEW_PRIM = 4,
+ TSE_ZERO_AREA_PRIM = 5,
+ TSE_FACENESS_CULLED_PRIM = 6,
+ TSE_ZERO_PIXEL_PRIM = 7,
+ TSE_OUTPUT_NULL_PRIM = 8,
+ TSE_OUTPUT_VISIBLE_PRIM = 9,
+ TSE_PRE_CLIP_PRIM = 10,
+ TSE_POST_CLIP_PRIM = 11,
+ TSE_BUSY_CYCLES = 12,
+ TSE_PC_STARVE = 13,
+ TSE_RAS_STALL = 14,
+ TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+ TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+ HLSQ_SP_VS_STAGE_CONSTANT = 0,
+ HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+ HLSQ_SP_FS_STAGE_CONSTANT = 2,
+ HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+ HLSQ_TP_STATE = 4,
+ HLSQ_QUADS = 5,
+ HLSQ_PIXELS = 6,
+ HLSQ_VERTICES = 7,
+ HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+ HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+ HLSQ_BUSY_CYCLES = 15,
+ HLSQ_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+ HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+ HLSQ_STALL_CYCLES_UCHE = 19,
+ HLSQ_RBBM_LOAD_CYCLES = 20,
+ HLSQ_DI_TO_VS_START_SP = 21,
+ HLSQ_DI_TO_FS_START_SP = 22,
+ HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+ HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+ HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+ HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+ HLSQ_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_UCHE_LATENCY_COUNT = 28,
+ HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+ PC_VIS_STREAMS_LOADED = 0,
+ PC_VPC_PRIMITIVES = 2,
+ PC_DEAD_PRIM = 3,
+ PC_LIVE_PRIM = 4,
+ PC_DEAD_DRAWCALLS = 5,
+ PC_LIVE_DRAWCALLS = 6,
+ PC_VERTEX_MISSES = 7,
+ PC_STALL_CYCLES_VFD = 9,
+ PC_STALL_CYCLES_TSE = 10,
+ PC_STALL_CYCLES_UCHE = 11,
+ PC_WORKING_CYCLES = 12,
+ PC_IA_VERTICES = 13,
+ PC_GS_PRIMITIVES = 14,
+ PC_HS_INVOCATIONS = 15,
+ PC_DS_INVOCATIONS = 16,
+ PC_DS_PRIMITIVES = 17,
+ PC_STARVE_CYCLES_FOR_INDEX = 20,
+ PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+ PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+ PC_STALL_CYCLES_TESS = 23,
+ PC_STARVE_CYCLES_FOR_POSITION = 24,
+ PC_MODE0_DRAWCALL = 25,
+ PC_MODE1_DRAWCALL = 26,
+ PC_MODE2_DRAWCALL = 27,
+ PC_MODE3_DRAWCALL = 28,
+ PC_MODE4_DRAWCALL = 29,
+ PC_PREDICATED_DEAD_DRAWCALL = 30,
+ PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+ PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+ PC_VPC_POS_DATA_TRANSACTION = 33,
+ PC_BUSY_CYCLES = 34,
+ PC_STARVE_CYCLES_DI = 35,
+ PC_STALL_CYCLES_VPC = 36,
+ TESS_WORKING_CYCLES = 37,
+ TESS_NUM_CYCLES_SETUP_WORKING = 38,
+ TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+ TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+ TESS_BUSY_CYCLES = 41,
+ TESS_STARVE_CYCLES_PC = 42,
+ TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+ PWR_CORE_CLOCK_CYCLES = 0,
+ PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+ RB_BUSY_CYCLES = 0,
+ RB_BUSY_CYCLES_BINNING = 1,
+ RB_BUSY_CYCLES_RENDERING = 2,
+ RB_BUSY_CYCLES_RESOLVE = 3,
+ RB_STARVE_CYCLES_BY_SP = 4,
+ RB_STARVE_CYCLES_BY_RAS = 5,
+ RB_STARVE_CYCLES_BY_MARB = 6,
+ RB_STALL_CYCLES_BY_MARB = 7,
+ RB_STALL_CYCLES_BY_HLSQ = 8,
+ RB_RB_RB_MARB_DATA = 9,
+ RB_SP_RB_QUAD = 10,
+ RB_RAS_RB_Z_QUADS = 11,
+ RB_GMEM_CH0_READ = 12,
+ RB_GMEM_CH1_READ = 13,
+ RB_GMEM_CH0_WRITE = 14,
+ RB_GMEM_CH1_WRITE = 15,
+ RB_CP_CONTEXT_DONE = 16,
+ RB_CP_CACHE_FLUSH = 17,
+ RB_CP_ZPASS_DONE = 18,
+ RB_STALL_FIFO0_FULL = 19,
+ RB_STALL_FIFO1_FULL = 20,
+ RB_STALL_FIFO2_FULL = 21,
+ RB_STALL_FIFO3_FULL = 22,
+ RB_RB_HLSQ_TRANSACTIONS = 23,
+ RB_Z_READ = 24,
+ RB_Z_WRITE = 25,
+ RB_C_READ = 26,
+ RB_C_WRITE = 27,
+ RB_C_READ_LATENCY = 28,
+ RB_Z_READ_LATENCY = 29,
+ RB_STALL_BY_UCHE = 30,
+ RB_MARB_UCHE_TRANSACTIONS = 31,
+ RB_CACHE_STALL_MISS = 32,
+ RB_CACHE_STALL_FIFO_FULL = 33,
+ RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+ RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+ RB_SAMPLER_UNITS_ACTIVE = 36,
+ RB_TOTAL_PASS = 38,
+ RB_Z_PASS = 39,
+ RB_Z_FAIL = 40,
+ RB_S_FAIL = 41,
+ RB_POWER0 = 42,
+ RB_POWER1 = 43,
+ RB_POWER2 = 44,
+ RB_POWER3 = 45,
+ RB_POWER4 = 46,
+ RB_POWER5 = 47,
+ RB_POWER6 = 48,
+ RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+ RBBM_ALWAYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TPL1_BUSY = 12,
+ RBBM_ANY_SP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_STATUS_MASKED = 22,
+ RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+ RBBM_TESS_BUSY = 24,
+ RBBM_COM_BUSY = 25,
+ RBBM_DCOM_BUSY = 32,
+ RBBM_ANY_CCU_BUSY = 33,
+ RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_GM_LOAD_INSTRUCTIONS = 3,
+ SP_GM_STORE_INSTRUCTIONS = 4,
+ SP_GM_ATOMICS = 5,
+ SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+ SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+ SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+ SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+ SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_WORKING_CYCLES = 29,
+ SP_EFU_WORKING_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_BUSY_CYCLES = 35,
+ SP_HS_INSTRUCTIONS = 36,
+ SP_DS_INSTRUCTIONS = 37,
+ SP_GS_INSTRUCTIONS = 38,
+ SP_CS_INSTRUCTIONS = 39,
+ SP_SCHEDULER_NON_WORKING = 40,
+ SP_WAVE_CONTEXTS = 41,
+ SP_WAVE_CONTEXT_CYCLES = 42,
+ SP_POWER0 = 43,
+ SP_POWER1 = 44,
+ SP_POWER2 = 45,
+ SP_POWER3 = 46,
+ SP_POWER4 = 47,
+ SP_POWER5 = 48,
+ SP_POWER6 = 49,
+ SP_POWER7 = 50,
+ SP_POWER8 = 51,
+ SP_POWER9 = 52,
+ SP_POWER10 = 53,
+ SP_POWER11 = 54,
+ SP_POWER12 = 55,
+ SP_POWER13 = 56,
+ SP_POWER14 = 57,
+ SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+ TP_L1_REQUESTS = 0,
+ TP_L1_MISSES = 1,
+ TP_QUADS_OFFSET = 8,
+ TP_QUAD_SHADOW = 9,
+ TP_QUADS_ARRAY = 10,
+ TP_QUADS_GRADIENT = 11,
+ TP_QUADS_1D2D = 12,
+ TP_QUADS_3DCUBE = 13,
+ TP_BUSY_CYCLES = 16,
+ TP_STALL_CYCLES_BY_ARB = 17,
+ TP_STATE_CACHE_REQUESTS = 20,
+ TP_STATE_CACHE_MISSES = 21,
+ TP_POWER0 = 22,
+ TP_POWER1 = 23,
+ TP_POWER2 = 24,
+ TP_POWER3 = 25,
+ TP_POWER4 = 26,
+ TP_POWER5 = 27,
+ TP_POWER6 = 28,
+ TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+ UCHE_VBIF_READ_BEATS_TP = 0,
+ UCHE_VBIF_READ_BEATS_VFD = 1,
+ UCHE_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_VBIF_READ_BEATS_MARB = 3,
+ UCHE_VBIF_READ_BEATS_SP = 4,
+ UCHE_READ_REQUESTS_TP = 5,
+ UCHE_READ_REQUESTS_VFD = 6,
+ UCHE_READ_REQUESTS_HLSQ = 7,
+ UCHE_READ_REQUESTS_MARB = 8,
+ UCHE_READ_REQUESTS_SP = 9,
+ UCHE_WRITE_REQUESTS_MARB = 10,
+ UCHE_WRITE_REQUESTS_SP = 11,
+ UCHE_TAG_CHECK_FAILS = 12,
+ UCHE_EVICTS = 13,
+ UCHE_FLUSHES = 14,
+ UCHE_VBIF_LATENCY_CYCLES = 15,
+ UCHE_VBIF_LATENCY_SAMPLES = 16,
+ UCHE_BUSY_CYCLES = 17,
+ UCHE_VBIF_READ_BEATS_PC = 18,
+ UCHE_READ_REQUESTS_PC = 19,
+ UCHE_WRITE_REQUESTS_VPC = 20,
+ UCHE_STALL_BY_VBIF = 21,
+ UCHE_WRITE_REQUESTS_VSC = 22,
+ UCHE_POWER0 = 23,
+ UCHE_POWER1 = 24,
+ UCHE_POWER2 = 25,
+ UCHE_POWER3 = 26,
+ UCHE_POWER4 = 27,
+ UCHE_POWER5 = 28,
+ UCHE_POWER6 = 29,
+ UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+ CYCLES_HELD_OFF_ID_0 = 86,
+ CYCLES_HELD_OFF_ID_1 = 87,
+ CYCLES_HELD_OFF_ID_2 = 88,
+ CYCLES_HELD_OFF_ID_3 = 89,
+ CYCLES_HELD_OFF_ID_4 = 90,
+ CYCLES_HELD_OFF_ID_5 = 91,
+ CYCLES_HELD_OFF_ID_6 = 92,
+ CYCLES_HELD_OFF_ID_7 = 93,
+ CYCLES_HELD_OFF_ID_8 = 94,
+ CYCLES_HELD_OFF_ID_9 = 95,
+ CYCLES_HELD_OFF_ID_10 = 96,
+ CYCLES_HELD_OFF_ID_11 = 97,
+ CYCLES_HELD_OFF_ID_12 = 98,
+ CYCLES_HELD_OFF_ID_13 = 99,
+ CYCLES_HELD_OFF_ID_14 = 100,
+ CYCLES_HELD_OFF_ID_15 = 101,
+ AXI_READ_REQUEST_HELD_OFF = 102,
+ AXI_WRITE_REQUEST_HELD_OFF = 103,
+ AXI_REQUEST_HELD_OFF = 104,
+ AXI_WRITE_DATA_HELD_OFF = 105,
+ OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+ OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+ OCMEM_AXI_REQUEST_HELD_OFF = 108,
+ OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+ ELAPSED_CYCLES_DDR = 110,
+ ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+ VFD_UCHE_BYTE_FETCHED = 0,
+ VFD_UCHE_TRANS = 1,
+ VFD_FETCH_INSTRUCTIONS = 3,
+ VFD_BUSY_CYCLES = 5,
+ VFD_STALL_CYCLES_UCHE = 6,
+ VFD_STALL_CYCLES_HLSQ = 7,
+ VFD_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_STALL_CYCLES_VPC_ALLOC = 9,
+ VFD_MODE_0_FIBERS = 13,
+ VFD_MODE_1_FIBERS = 14,
+ VFD_MODE_2_FIBERS = 15,
+ VFD_MODE_3_FIBERS = 16,
+ VFD_MODE_4_FIBERS = 17,
+ VFD_BFIFO_STALL = 18,
+ VFD_NUM_VERTICES_TOTAL = 19,
+ VFD_PACKER_FULL = 20,
+ VFD_UCHE_REQUEST_FIFO_FULL = 21,
+ VFD_STARVE_CYCLES_PC = 22,
+ VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+ VPC_SP_LM_COMPONENTS = 2,
+ VPC_SP0_LM_BYTES = 3,
+ VPC_SP1_LM_BYTES = 4,
+ VPC_SP2_LM_BYTES = 5,
+ VPC_SP3_LM_BYTES = 6,
+ VPC_WORKING_CYCLES = 7,
+ VPC_STALL_CYCLES_LM = 8,
+ VPC_STARVE_CYCLES_RAS = 9,
+ VPC_STREAMOUT_CYCLES = 10,
+ VPC_UCHE_TRANSACTIONS = 12,
+ VPC_STALL_CYCLES_UCHE = 13,
+ VPC_BUSY_CYCLES = 14,
+ VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+ VSC_BUSY_CYCLES = 0,
+ VSC_WORKING_CYCLES = 1,
+ VSC_STALL_CYCLES_UCHE = 2,
+ VSC_STARVE_CYCLES_RAS = 3,
+ VSC_EOT_NUM = 4,
+};
+
+enum a4xx_tex_filter {
+ A4XX_TEX_NEAREST = 0,
+ A4XX_TEX_LINEAR = 1,
+ A4XX_TEX_ANISO = 2,
+};
+
+enum a4xx_tex_clamp {
+ A4XX_TEX_REPEAT = 0,
+ A4XX_TEX_CLAMP_TO_EDGE = 1,
+ A4XX_TEX_MIRROR_REPEAT = 2,
+ A4XX_TEX_CLAMP_TO_BORDER = 3,
+ A4XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a4xx_tex_aniso {
+ A4XX_TEX_ANISO_1 = 0,
+ A4XX_TEX_ANISO_2 = 1,
+ A4XX_TEX_ANISO_4 = 2,
+ A4XX_TEX_ANISO_8 = 3,
+ A4XX_TEX_ANISO_16 = 4,
+};
+
+enum a4xx_tex_swiz {
+ A4XX_TEX_X = 0,
+ A4XX_TEX_Y = 1,
+ A4XX_TEX_Z = 2,
+ A4XX_TEX_W = 3,
+ A4XX_TEX_ZERO = 4,
+ A4XX_TEX_ONE = 5,
+};
+
+enum a4xx_tex_type {
+ A4XX_TEX_1D = 0,
+ A4XX_TEX_2D = 1,
+ A4XX_TEX_CUBE = 2,
+ A4XX_TEX_3D = 3,
+ A4XX_TEX_BUFFER = 4,
+};
+
+#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000
+#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20
+static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
+{
+ return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+}
+#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A4XX_INT0_VFD_ERROR 0x00000040
+#define A4XX_INT0_CP_SW_INT 0x00000080
+#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A4XX_INT0_CP_HW_FAULT 0x00000800
+#define A4XX_INT0_CP_DMA 0x00001000
+#define A4XX_INT0_CP_IB2_INT 0x00002000
+#define A4XX_INT0_CP_IB1_INT 0x00004000
+#define A4XX_INT0_CP_RB_INT 0x00008000
+#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A4XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A4XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A4XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
+
+#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf
+
+#define REG_A4XX_RB_MODE_CONTROL 0x000020a0
+#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f
+#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
+}
+#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
+#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
+static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
+}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
+
+#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
+#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
+#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020
+
+#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2
+#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13
+static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK 0x0000000f
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT 0
+static inline uint32_t A4XX_RB_RENDER_CONTROL2_COORD_MASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT) & A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK;
+}
+#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010
+#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020
+#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040
+#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK;
+}
+#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_PIXEL 0x00001000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_CENTROID 0x00002000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE 0x00004000
+#define A4XX_RB_RENDER_CONTROL2_SIZE 0x00008000
+
+static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
+#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a4xx_tile_mode val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
+static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_RED 0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
+#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK 0x000000ff
+#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
+}
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
+}
+
+#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2
+static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
+#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
+#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0
+#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5
+static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000
+#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
+#define A4XX_RB_FS_OUTPUT_REG_MRT__MASK 0x0000000f
+#define A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT 0
+static inline uint32_t A4XX_RB_FS_OUTPUT_REG_MRT(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_RB_FS_OUTPUT_REG_MRT__MASK;
+}
+#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
+
+#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
+#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
+#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002
+#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
+#define A4XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000
+
+#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
+
+#define REG_A4XX_RB_DEPTH_INFO 0x00002103
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
+#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
+#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
+#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
+
+#define REG_A4XX_RB_STENCIL_INFO 0x00002108
+#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_PITCH 0x00002109
+#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A4XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
+#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_BIN_OFFSET 0x0000210d
+#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff
+#define A4XX_RB_BIN_OFFSET_X__SHIFT 0
+static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
+}
+#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000
+#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16
+static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP(uint32_t i0) { return 0x00002120 + 0x2*i0; }
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; }
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; }
+
+#define REG_A4XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014
+
+#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015
+
+#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016
+
+#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017
+
+#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019
+
+#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b
+
+#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c
+
+#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d
+
+#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f
+
+#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020
+
+#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021
+
+#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022
+
+#define REG_A4XX_RBBM_AHB_CTL0 0x00000023
+
+#define REG_A4XX_RBBM_AHB_CTL1 0x00000024
+
+#define REG_A4XX_RBBM_AHB_CMD 0x00000025
+
+#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026
+
+#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034
+
+#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036
+
+#define REG_A4XX_RBBM_INT_0_MASK 0x00000037
+
+#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e
+
+#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f
+
+#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041
+
+#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042
+
+#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047
+
+#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+
+#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080
+
+#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081
+
+#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a
+
+#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
+
+#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179
+
+#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
+
+#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
+
+#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182
+
+#define REG_A4XX_RBBM_AHB_STATUS 0x00000189
+
+#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c
+
+#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d
+
+#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f
+
+#define REG_A4XX_RBBM_STATUS 0x00000191
+#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+
+#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8
+
+#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
+
+#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
+
+#define REG_A4XX_CP_RB_BASE 0x00000200
+
+#define REG_A4XX_CP_RB_CNTL 0x00000201
+
+#define REG_A4XX_CP_RB_WPTR 0x00000205
+
+#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203
+
+#define REG_A4XX_CP_RB_RPTR 0x00000204
+
+#define REG_A4XX_CP_IB1_BASE 0x00000206
+
+#define REG_A4XX_CP_IB1_BUFSZ 0x00000207
+
+#define REG_A4XX_CP_IB2_BASE 0x00000208
+
+#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
+
+#define REG_A4XX_CP_ME_NRT_ADDR 0x0000020c
+
+#define REG_A4XX_CP_ME_NRT_DATA 0x0000020d
+
+#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
+
+#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
+
+#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b
+
+#define REG_A4XX_CP_ROQ_ADDR 0x0000021c
+
+#define REG_A4XX_CP_ROQ_DATA 0x0000021d
+
+#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
+
+#define REG_A4XX_CP_MEQ_DATA 0x0000021f
+
+#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
+
+#define REG_A4XX_CP_MERCIU_DATA 0x00000221
+
+#define REG_A4XX_CP_MERCIU_DATA2 0x00000222
+
+#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223
+
+#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224
+
+#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225
+
+#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226
+
+#define REG_A4XX_CP_ME_RAM_DATA 0x00000227
+
+#define REG_A4XX_CP_PREEMPT 0x0000022a
+
+#define REG_A4XX_CP_CNTL 0x0000022c
+
+#define REG_A4XX_CP_ME_CNTL 0x0000022d
+
+#define REG_A4XX_CP_DEBUG 0x0000022e
+
+#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231
+
+#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
+
+static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
+
+#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
+
+#define REG_A4XX_CP_ST_BASE 0x000004c0
+
+#define REG_A4XX_CP_STQ_AVAIL 0x000004ce
+
+#define REG_A4XX_CP_MERCIU_STAT 0x000004d0
+
+#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2
+
+#define REG_A4XX_CP_HW_FAULT 0x000004d8
+
+#define REG_A4XX_CP_PROTECT_STATUS 0x000004da
+
+#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507
+
+#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
+
+static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+#define REG_A4XX_SP_VS_STATUS 0x00000ec0
+
+#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
+
+#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
+#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
+
+#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
+#define A4XX_SP_INSTR_CACHE_CTRL_VS_BUFFER 0x00000080
+#define A4XX_SP_INSTR_CACHE_CTRL_FS_BUFFER 0x00000100
+#define A4XX_SP_INSTR_CACHE_CTRL_INSTR_BUFFER 0x00000400
+
+#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6
+#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_START 0x000022e1
+
+#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2
+
+#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3
+
+#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5
+
+#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000
+#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
+#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000
+
+#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_FS_OBJ_START 0x000022eb
+
+#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec
+
+#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed
+
+#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
+
+#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
+#define A4XX_SP_FS_OUTPUT_REG_MRT__MASK 0x0000000f
+#define A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_SP_FS_OUTPUT_REG_MRT__MASK;
+}
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
+#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK 0xff000000
+#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT 24
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A4XX_SP_FS_MRT_REG_COLOR_SINT 0x00000400
+#define A4XX_SP_FS_MRT_REG_COLOR_UINT 0x00000800
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12
+static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
+}
+#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000
+
+#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300
+#define A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_CS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_CS_OBJ_OFFSET_REG 0x00002301
+
+#define REG_A4XX_SP_CS_OBJ_START 0x00002302
+
+#define REG_A4XX_SP_CS_PVT_MEM_PARAM 0x00002303
+
+#define REG_A4XX_SP_CS_PVT_MEM_ADDR 0x00002304
+
+#define REG_A4XX_SP_CS_PVT_MEM_SIZE 0x00002305
+
+#define REG_A4XX_SP_CS_LENGTH_REG 0x00002306
+
+#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_HS_OBJ_START 0x0000230e
+
+#define REG_A4XX_SP_HS_PVT_MEM_PARAM 0x0000230f
+
+#define REG_A4XX_SP_HS_PVT_MEM_ADDR 0x00002310
+
+#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312
+
+#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a
+#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_DS_OBJ_START 0x00002335
+
+#define REG_A4XX_SP_DS_PVT_MEM_PARAM 0x00002336
+
+#define REG_A4XX_SP_DS_PVT_MEM_ADDR 0x00002337
+
+#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339
+
+#define REG_A4XX_SP_GS_PARAM_REG 0x00002341
+#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8
+static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_OBJ_START 0x0000235c
+
+#define REG_A4XX_SP_GS_PVT_MEM_PARAM 0x0000235d
+
+#define REG_A4XX_SP_GS_PVT_MEM_ADDR 0x0000235e
+
+#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
+
+#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
+
+#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61
+
+#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
+
+#define REG_A4XX_VPC_ATTR 0x00002140
+#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
+#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A4XX_VPC_ATTR_PSIZE 0x00000200
+#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000
+#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A4XX_VPC_ATTR_ENABLE 0x02000000
+
+#define REG_A4XX_VPC_PACK 0x00002141
+#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff
+#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0
+static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e
+
+#define REG_A4XX_VSC_BIN_SIZE 0x00000c00
+#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
+
+#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02
+
+#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
+#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
+#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51
+
+#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
+
+#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
+
+#define REG_A4XX_VGT_EVENT_INITIATOR 0x000021d9
+
+#define REG_A4XX_VFD_CONTROL_0 0x00002200
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9
+static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_1 0x00002201
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_2 0x00002202
+
+#define REG_A4XX_VFD_CONTROL_3 0x00002203
+#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00
+#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_4 0x00002204
+
+#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
+
+static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
+#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xffffffff
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff
+#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_INT 0x00100000
+#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
+
+#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
+
+#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
+
+#define REG_A4XX_TPL1_TP_TEX_COUNT 0x00002381
+#define A4XX_TPL1_TP_TEX_COUNT_VS__MASK 0x000000ff
+#define A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT 0
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_VS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_VS__MASK;
+}
+#define A4XX_TPL1_TP_TEX_COUNT_HS__MASK 0x0000ff00
+#define A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT 8
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_HS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_HS__MASK;
+}
+#define A4XX_TPL1_TP_TEX_COUNT_DS__MASK 0x00ff0000
+#define A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT 16
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_DS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_DS__MASK;
+}
+#define A4XX_TPL1_TP_TEX_COUNT_GS__MASK 0xff000000
+#define A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT 24
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_GS__MASK;
+}
+
+#define REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002384
+
+#define REG_A4XX_TPL1_TP_HS_BORDER_COLOR_BASE_ADDR 0x00002387
+
+#define REG_A4XX_TPL1_TP_DS_BORDER_COLOR_BASE_ADDR 0x0000238a
+
+#define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d
+
+#define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0
+
+#define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1
+
+#define REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR 0x000023a4
+
+#define REG_A4XX_TPL1_TP_CS_SAMPLER_BASE_ADDR 0x000023a5
+
+#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
+
+#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
+
+#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f
+
+#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
+
+#define REG_A4XX_GRAS_CNTL 0x00002003
+#define A4XX_GRAS_CNTL_IJ_PERSP 0x00000001
+#define A4XX_GRAS_CNTL_IJ_LINEAR 0x00000002
+
+#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071
+#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
+#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_CLAMP 0x00002076
+#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
+static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000
+#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
+
+#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK;
+}
+
+#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
+
+#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83
+
+#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84
+
+#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88
+
+#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a
+
+#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b
+
+#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
+
+#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
+
+#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
+
+#define REG_A4XX_HLSQ_MODE_CONTROL 0x00000e05
+
+#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d
+
+#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000
+#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc
+#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK 0x0003fc00
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT 10
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK 0x03fc0000
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT 18
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
+
+#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
+
+#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK 0x00fff000
+#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
+#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK 0x00fff000
+#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_KERNEL_CONST 0x000023d6
+#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK 0x00fff000
+#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_X 0x000023d7
+
+#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Y 0x000023d8
+
+#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Z 0x000023d9
+
+#define REG_A4XX_HLSQ_CL_WG_OFFSET 0x000023da
+#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK;
+}
+
+#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
+
+#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
+#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
+#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A4XX_PC_BIN_BASE 0x000021c0
+
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
+#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
+#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
+#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT) & A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
+#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
+
+#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040
+
+#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
+
+#define REG_A4XX_PC_GS_PARAM 0x000021e5
+#define A4XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A4XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A4XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A4XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A4XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A4XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A4XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A4XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
+#define A4XX_PC_GS_PARAM_LAYER 0x80000000
+
+#define REG_A4XX_PC_HS_PARAM 0x000021e7
+#define A4XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A4XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A4XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A4XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A4XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
+
+#define REG_A4XX_VBIF_VERSION 0x00003000
+
+#define REG_A4XX_VBIF_CLKON 0x00003001
+#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001
+
+#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A4XX_VBIF_PERF_CNT_EN0 0x000030c0
+
+#define REG_A4XX_VBIF_PERF_CNT_EN1 0x000030c1
+
+#define REG_A4XX_VBIF_PERF_CNT_EN2 0x000030c2
+
+#define REG_A4XX_VBIF_PERF_CNT_EN3 0x000030c3
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A4XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A4XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A4XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
+
+#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
+
+#define REG_A4XX_UNKNOWN_0D01 0x00000d01
+
+#define REG_A4XX_UNKNOWN_0E42 0x00000e42
+
+#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
+
+#define REG_A4XX_UNKNOWN_2001 0x00002001
+
+#define REG_A4XX_UNKNOWN_209B 0x0000209b
+
+#define REG_A4XX_UNKNOWN_20EF 0x000020ef
+
+#define REG_A4XX_UNKNOWN_2152 0x00002152
+
+#define REG_A4XX_UNKNOWN_2153 0x00002153
+
+#define REG_A4XX_UNKNOWN_2154 0x00002154
+
+#define REG_A4XX_UNKNOWN_2155 0x00002155
+
+#define REG_A4XX_UNKNOWN_2156 0x00002156
+
+#define REG_A4XX_UNKNOWN_2157 0x00002157
+
+#define REG_A4XX_UNKNOWN_21C3 0x000021c3
+
+#define REG_A4XX_UNKNOWN_21E6 0x000021e6
+
+#define REG_A4XX_UNKNOWN_2209 0x00002209
+
+#define REG_A4XX_UNKNOWN_22D7 0x000022d7
+
+#define REG_A4XX_UNKNOWN_2352 0x00002352
+
+#define REG_A4XX_TEX_SAMP_0 0x00000000
+#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A4XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A4XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A4XX_TEX_SAMP_1 0x00000001
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_0 0x00000000
+#define A4XX_TEX_CONST_0_TILED 0x00000001
+#define A4XX_TEX_CONST_0_SRGB 0x00000004
+#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A4XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
+{
+ return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
+}
+#define A4XX_TEX_CONST_0_TYPE__MASK 0xe0000000
+#define A4XX_TEX_CONST_0_TYPE__SHIFT 29
+static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
+{
+ return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_1 0x00000001
+#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff
+#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000
+#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
+static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_2 0x00000002
+#define A4XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A4XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A4XX_TEX_CONST_2_PITCHALIGN__MASK;
+}
+#define A4XX_TEX_CONST_2_BUFFER 0x00000040
+#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
+#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
+static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A4XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_3 0x00000003
+#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
+#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000
+#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18
+static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_4 0x00000004
+#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f
+#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK;
+}
+#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0
+#define A4XX_TEX_CONST_4_BASE__SHIFT 5
+static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_5 0x00000005
+
+#define REG_A4XX_TEX_CONST_6 0x00000006
+
+#define REG_A4XX_TEX_CONST_7 0x00000007
+
+#define REG_A4XX_SSBO_0_0 0x00000000
+#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0
+#define A4XX_SSBO_0_0_BASE__SHIFT 5
+static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
+}
+
+#define REG_A4XX_SSBO_0_1 0x00000001
+#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A4XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_2 0x00000002
+#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_3 0x00000003
+#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A4XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A4XX_SSBO_1_0 0x00000000
+#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f
+#define A4XX_SSBO_1_0_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK;
+}
+#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A4XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK;
+}
+#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A4XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A4XX_SSBO_1_1 0x00000001
+#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A4XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
+}
+
+
+#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
new file mode 100644
index 000000000..a10feb8a4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+#include "a4xx_gpu.h"
+
+#define A4XX_INT0_MASK \
+ (A4XX_INT0_RBBM_AHB_ERROR | \
+ A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A4XX_INT0_CP_T0_PACKET_IN_IB | \
+ A4XX_INT0_CP_OPCODE_ERROR | \
+ A4XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A4XX_INT0_CP_HW_FAULT | \
+ A4XX_INT0_CP_IB1_INT | \
+ A4XX_INT0_CP_IB2_INT | \
+ A4XX_INT0_CP_RB_INT | \
+ A4XX_INT0_CP_REG_PROTECT_FAULT | \
+ A4XX_INT0_CP_AHB_ERROR_HALT | \
+ A4XX_INT0_CACHE_FLUSH_TS | \
+ A4XX_INT0_UCHE_OOB_ACCESS)
+
+extern bool hang_debug;
+static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
+
+static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+
+ adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
+}
+
+/*
+ * a4xx_enable_hwcg() - Program the clock control registers
+ * @device: The adreno device pointer
+ */
+static void a4xx_enable_hwcg(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
+
+ /* Disable L1 clocking in A420 due to CCU issues with it */
+ for (i = 0; i < 4; i++) {
+ if (adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00002020);
+ } else {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00022020);
+ }
+ }
+
+ /* No CCU for A405 */
+ if (!adreno_is_a405(adreno_gpu)) {
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+ 0x00000922);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+ 0x00000000);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+ 0x00000001);
+ }
+ }
+
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+ /* Early A430's have a timing issue with SP/TP power collapse;
+ disabling HW clock gating prevents it. */
+ if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+ else
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
+}
+
+
+static bool a4xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
+ return a4xx_idle(gpu);
+}
+
+static int a4xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ if (adreno_is_a405(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection */
+ gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+ }
+
+ /* Enable the RBBM error reporting bits */
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting*/
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Enable power counters*/
+ gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
+
+ /*
+ * Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
+ (1 << 30) | 0xFFFF);
+
+ gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a4xx_gpu->ocmem.base >> 14));
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* use the first CP counter for timestamp queries.. userspace may set
+ * this as well but it selects the same counter/countable:
+ */
+ gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+ if (adreno_is_a430(adreno_gpu))
+ gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
+ /* Disable L2 bypass to avoid UCHE out of bounds errors */
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+
+ gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
+ (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+
+ /* On A430 enable SP regfile sleep for power savings */
+ /* TODO downstream does this for !420, so maybe applies for 405 too? */
+ if (!adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+ 0x00000441);
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+ 0x00000441);
+ }
+
+ a4xx_enable_hwcg(gpu);
+
+ /*
+ * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
+ * due to timing issue with HLSQ_TP_CLK_EN
+ */
+ if (adreno_is_a420(adreno_gpu)) {
+ unsigned int val;
+ val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
+ val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+ val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
+ }
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
+
+
+ /* RB registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
+
+ /* HLSQ registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
+
+ /* VPC registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
+
+ /* SMMU registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %u", ptr[0]);
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
+
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a4xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a4xx_dump(gpu);
+
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a4xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
+
+ kfree(a4xx_gpu);
+}
+
+static bool a4xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
+ DBG("%s: Int status %08x", gpu->name, status);
+
+ if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
+ uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
+ printk("CP | Protected mode error| %s | addr=%x\n",
+ reg & (1 << 24) ? "WRITE" : "READ",
+ (reg & 0xFFFFF) >> 2);
+ }
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a4xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* VMIDMT */
+ 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
+ 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
+ 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
+ 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
+ 0x1380, 0x1380,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* XPU */
+ 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
+ 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
+ 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
+ /* VBIF */
+ 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
+ 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
+ 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
+ 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
+ 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
+ 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
+ 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
+ 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
+ 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
+ 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
+ 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
+ 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
+ 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
+ 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
+ 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
+ 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
+ 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
+ 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
+ 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
+ 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
+ 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
+ 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
+ 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
+ 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
+ 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
+ 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
+ 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
+ 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
+ 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a405_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* VBIF version 0x20050000*/
+ 0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
+ 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
+ 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
+ 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
+ 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
+ 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
+ 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
+ 0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
+ ~0 /* sentinel */
+};
+
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
+}
+
+static void a4xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ unsigned int reg;
+ /* Set the default register values; set SW_COLLAPSE to 0 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+ do {
+ udelay(5);
+ reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+ } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+ }
+ return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ /* Set the default register values; set SW_COLLAPSE to 1 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+ }
+ return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
+
+ return 0;
+}
+
+static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a4xx_hw_init,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
+ .recover = a4xx_recover,
+ .submit = a4xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a4xx_irq,
+ .destroy = a4xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
+ .get_rptr = a4xx_get_rptr,
+ },
+ .get_timestamp = a4xx_get_timestamp,
+};
+
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+{
+ struct a4xx_gpu *a4xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct icc_path *ocmem_icc_path;
+ struct icc_path *icc_path;
+ int ret;
+
+ if (!pdev) {
+ DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
+ if (!a4xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a4xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = NULL;
+ gpu->num_perfcntrs = 0;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
+ a4xx_registers;
+
+ /* if needed, allocate gmem: */
+ ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
+ &a4xx_gpu->ocmem);
+ if (ret)
+ goto fail;
+
+ if (!gpu->aspace) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
+ }
+
+ icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
+ goto fail;
+ }
+
+ ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
+ /* allow -ENODATA, ocmem icc is optional */
+ if (ret != -ENODATA)
+ goto fail;
+ ocmem_icc_path = NULL;
+ }
+
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
+ return gpu;
+
+fail:
+ if (a4xx_gpu)
+ a4xx_destroy(&a4xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
new file mode 100644
index 000000000..a01448cba
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+#ifndef __A4XX_GPU_H__
+#define __A4XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a4xx.xml.h"
+
+struct a4xx_gpu {
+ struct adreno_gpu base;
+
+ /* if OCMEM is used for GMEM: */
+ struct adreno_ocmem ocmem;
+};
+#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+
+#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000..2505b4e43
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,5492 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_A8_UNORM = 2,
+ RB5_R8_UNORM = 3,
+ RB5_R8_SNORM = 4,
+ RB5_R8_UINT = 5,
+ RB5_R8_SINT = 6,
+ RB5_R4G4B4A4_UNORM = 8,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R5G6B5_UNORM = 14,
+ RB5_R8G8_UNORM = 15,
+ RB5_R8G8_SNORM = 16,
+ RB5_R8G8_UINT = 17,
+ RB5_R8G8_SINT = 18,
+ RB5_R16_UNORM = 21,
+ RB5_R16_SNORM = 22,
+ RB5_R16_FLOAT = 23,
+ RB5_R16_UINT = 24,
+ RB5_R16_SINT = 25,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_SNORM = 50,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R8G8B8A8_SINT = 52,
+ RB5_R10G10B10A2_UNORM = 55,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R11G11B10_FLOAT = 66,
+ RB5_R16G16_UNORM = 67,
+ RB5_R16G16_SNORM = 68,
+ RB5_R16G16_FLOAT = 69,
+ RB5_R16G16_UINT = 70,
+ RB5_R16G16_SINT = 71,
+ RB5_R32_FLOAT = 74,
+ RB5_R32_UINT = 75,
+ RB5_R32_SINT = 76,
+ RB5_R16G16B16A16_UNORM = 96,
+ RB5_R16G16B16A16_SNORM = 97,
+ RB5_R16G16B16A16_FLOAT = 98,
+ RB5_R16G16B16A16_UINT = 99,
+ RB5_R16G16B16A16_SINT = 100,
+ RB5_R32G32_FLOAT = 103,
+ RB5_R32G32_UINT = 104,
+ RB5_R32G32_SINT = 105,
+ RB5_R32G32B32A32_FLOAT = 130,
+ RB5_R32G32B32A32_UINT = 131,
+ RB5_R32G32B32A32_SINT = 132,
+ RB5_NONE = 255,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_10_10_10_2_UNORM = 54,
+ VFMT5_10_10_10_2_SNORM = 57,
+ VFMT5_10_10_10_2_UINT = 58,
+ VFMT5_10_10_10_2_SINT = 59,
+ VFMT5_11_11_10_FLOAT = 66,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+ VFMT5_NONE = 255,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_8_SNORM = 4,
+ TFMT5_8_UINT = 5,
+ TFMT5_8_SINT = 6,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_5_5_1_UNORM = 10,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_8_8_UNORM = 15,
+ TFMT5_8_8_SNORM = 16,
+ TFMT5_8_8_UINT = 17,
+ TFMT5_8_8_SINT = 18,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_UNORM = 21,
+ TFMT5_16_SNORM = 22,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_16_UINT = 24,
+ TFMT5_16_SINT = 25,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_8_8_8_UNORM = 49,
+ TFMT5_8_8_8_8_SNORM = 50,
+ TFMT5_8_8_8_8_UINT = 51,
+ TFMT5_8_8_8_8_SINT = 52,
+ TFMT5_9_9_9_E5_FLOAT = 53,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_10_10_10_2_UINT = 58,
+ TFMT5_11_11_10_FLOAT = 66,
+ TFMT5_16_16_UNORM = 67,
+ TFMT5_16_16_SNORM = 68,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_16_16_UINT = 70,
+ TFMT5_16_16_SINT = 71,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_32_UINT = 75,
+ TFMT5_32_SINT = 76,
+ TFMT5_16_16_16_16_UNORM = 96,
+ TFMT5_16_16_16_16_SNORM = 97,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_16_16_16_16_UINT = 99,
+ TFMT5_16_16_16_16_SINT = 100,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_UINT = 104,
+ TFMT5_32_32_SINT = 105,
+ TFMT5_32_32_32_UINT = 114,
+ TFMT5_32_32_32_SINT = 115,
+ TFMT5_32_32_32_FLOAT = 116,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_32_32_32_32_UINT = 131,
+ TFMT5_32_32_32_32_SINT = 132,
+ TFMT5_X8Z24_UNORM = 160,
+ TFMT5_ETC2_RG11_UNORM = 171,
+ TFMT5_ETC2_RG11_SNORM = 172,
+ TFMT5_ETC2_R11_UNORM = 173,
+ TFMT5_ETC2_R11_SNORM = 174,
+ TFMT5_ETC1 = 175,
+ TFMT5_ETC2_RGB8 = 176,
+ TFMT5_ETC2_RGBA8 = 177,
+ TFMT5_ETC2_RGB8A1 = 178,
+ TFMT5_DXT1 = 179,
+ TFMT5_DXT3 = 180,
+ TFMT5_DXT5 = 181,
+ TFMT5_RGTC1_UNORM = 183,
+ TFMT5_RGTC1_SNORM = 184,
+ TFMT5_RGTC2_UNORM = 187,
+ TFMT5_RGTC2_SNORM = 188,
+ TFMT5_BPTC_UFLOAT = 190,
+ TFMT5_BPTC_FLOAT = 191,
+ TFMT5_BPTC = 192,
+ TFMT5_ASTC_4x4 = 193,
+ TFMT5_ASTC_5x4 = 194,
+ TFMT5_ASTC_5x5 = 195,
+ TFMT5_ASTC_6x5 = 196,
+ TFMT5_ASTC_6x6 = 197,
+ TFMT5_ASTC_8x5 = 198,
+ TFMT5_ASTC_8x6 = 199,
+ TFMT5_ASTC_8x8 = 200,
+ TFMT5_ASTC_10x5 = 201,
+ TFMT5_ASTC_10x6 = 202,
+ TFMT5_ASTC_10x8 = 203,
+ TFMT5_ASTC_10x10 = 204,
+ TFMT5_ASTC_12x10 = 205,
+ TFMT5_ASTC_12x12 = 206,
+ TFMT5_NONE = 255,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_blit_buf {
+ BLIT_MRT0 = 0,
+ BLIT_MRT1 = 1,
+ BLIT_MRT2 = 2,
+ BLIT_MRT3 = 3,
+ BLIT_MRT4 = 4,
+ BLIT_MRT5 = 5,
+ BLIT_MRT6 = 6,
+ BLIT_MRT7 = 7,
+ BLIT_ZS = 8,
+ BLIT_S = 9,
+};
+
+enum a5xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+ PERF_CP_BUSY_GFX_CORE_IDLE = 1,
+ PERF_CP_BUSY_CYCLES = 2,
+ PERF_CP_PFP_IDLE = 3,
+ PERF_CP_PFP_BUSY_WORKING = 4,
+ PERF_CP_PFP_STALL_CYCLES_ANY = 5,
+ PERF_CP_PFP_STARVE_CYCLES_ANY = 6,
+ PERF_CP_PFP_ICACHE_MISS = 7,
+ PERF_CP_PFP_ICACHE_HIT = 8,
+ PERF_CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ PERF_CP_ME_BUSY_WORKING = 10,
+ PERF_CP_ME_IDLE = 11,
+ PERF_CP_ME_STARVE_CYCLES_ANY = 12,
+ PERF_CP_ME_FIFO_EMPTY_PFP_IDLE = 13,
+ PERF_CP_ME_FIFO_EMPTY_PFP_BUSY = 14,
+ PERF_CP_ME_FIFO_FULL_ME_BUSY = 15,
+ PERF_CP_ME_FIFO_FULL_ME_NON_WORKING = 16,
+ PERF_CP_ME_STALL_CYCLES_ANY = 17,
+ PERF_CP_ME_ICACHE_MISS = 18,
+ PERF_CP_ME_ICACHE_HIT = 19,
+ PERF_CP_NUM_PREEMPTIONS = 20,
+ PERF_CP_PREEMPTION_REACTION_DELAY = 21,
+ PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 22,
+ PERF_CP_PREEMPTION_SWITCH_IN_TIME = 23,
+ PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 24,
+ PERF_CP_PREDICATED_DRAWS_KILLED = 25,
+ PERF_CP_MODE_SWITCH = 26,
+ PERF_CP_ZPASS_DONE = 27,
+ PERF_CP_CONTEXT_DONE = 28,
+ PERF_CP_CACHE_FLUSH = 29,
+ PERF_CP_LONG_PREEMPTIONS = 30,
+};
+
+enum a5xx_rbbm_perfcounter_select {
+ PERF_RBBM_ALWAYS_COUNT = 0,
+ PERF_RBBM_ALWAYS_ON = 1,
+ PERF_RBBM_TSE_BUSY = 2,
+ PERF_RBBM_RAS_BUSY = 3,
+ PERF_RBBM_PC_DCALL_BUSY = 4,
+ PERF_RBBM_PC_VSD_BUSY = 5,
+ PERF_RBBM_STATUS_MASKED = 6,
+ PERF_RBBM_COM_BUSY = 7,
+ PERF_RBBM_DCOM_BUSY = 8,
+ PERF_RBBM_VBIF_BUSY = 9,
+ PERF_RBBM_VSC_BUSY = 10,
+ PERF_RBBM_TESS_BUSY = 11,
+ PERF_RBBM_UCHE_BUSY = 12,
+ PERF_RBBM_HLSQ_BUSY = 13,
+};
+
+enum a5xx_pc_perfcounter_select {
+ PERF_PC_BUSY_CYCLES = 0,
+ PERF_PC_WORKING_CYCLES = 1,
+ PERF_PC_STALL_CYCLES_VFD = 2,
+ PERF_PC_STALL_CYCLES_TSE = 3,
+ PERF_PC_STALL_CYCLES_VPC = 4,
+ PERF_PC_STALL_CYCLES_UCHE = 5,
+ PERF_PC_STALL_CYCLES_TESS = 6,
+ PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
+ PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
+ PERF_PC_PASS1_TF_STALL_CYCLES = 9,
+ PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
+ PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
+ PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
+ PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
+ PERF_PC_STARVE_CYCLES_DI = 14,
+ PERF_PC_VIS_STREAMS_LOADED = 15,
+ PERF_PC_INSTANCES = 16,
+ PERF_PC_VPC_PRIMITIVES = 17,
+ PERF_PC_DEAD_PRIM = 18,
+ PERF_PC_LIVE_PRIM = 19,
+ PERF_PC_VERTEX_HITS = 20,
+ PERF_PC_IA_VERTICES = 21,
+ PERF_PC_IA_PRIMITIVES = 22,
+ PERF_PC_GS_PRIMITIVES = 23,
+ PERF_PC_HS_INVOCATIONS = 24,
+ PERF_PC_DS_INVOCATIONS = 25,
+ PERF_PC_VS_INVOCATIONS = 26,
+ PERF_PC_GS_INVOCATIONS = 27,
+ PERF_PC_DS_PRIMITIVES = 28,
+ PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
+ PERF_PC_3D_DRAWCALLS = 30,
+ PERF_PC_2D_DRAWCALLS = 31,
+ PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
+ PERF_TESS_BUSY_CYCLES = 33,
+ PERF_TESS_WORKING_CYCLES = 34,
+ PERF_TESS_STALL_CYCLES_PC = 35,
+ PERF_TESS_STARVE_CYCLES_PC = 36,
+};
+
+enum a5xx_vfd_perfcounter_select {
+ PERF_VFD_BUSY_CYCLES = 0,
+ PERF_VFD_STALL_CYCLES_UCHE = 1,
+ PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
+ PERF_VFD_STALL_CYCLES_MISS_VB = 3,
+ PERF_VFD_STALL_CYCLES_MISS_Q = 4,
+ PERF_VFD_STALL_CYCLES_SP_INFO = 5,
+ PERF_VFD_STALL_CYCLES_SP_ATTR = 6,
+ PERF_VFD_STALL_CYCLES_VFDP_VB = 7,
+ PERF_VFD_STALL_CYCLES_VFDP_Q = 8,
+ PERF_VFD_DECODER_PACKER_STALL = 9,
+ PERF_VFD_STARVE_CYCLES_UCHE = 10,
+ PERF_VFD_RBUFFER_FULL = 11,
+ PERF_VFD_ATTR_INFO_FIFO_FULL = 12,
+ PERF_VFD_DECODED_ATTRIBUTE_BYTES = 13,
+ PERF_VFD_NUM_ATTRIBUTES = 14,
+ PERF_VFD_INSTRUCTIONS = 15,
+ PERF_VFD_UPPER_SHADER_FIBERS = 16,
+ PERF_VFD_LOWER_SHADER_FIBERS = 17,
+ PERF_VFD_MODE_0_FIBERS = 18,
+ PERF_VFD_MODE_1_FIBERS = 19,
+ PERF_VFD_MODE_2_FIBERS = 20,
+ PERF_VFD_MODE_3_FIBERS = 21,
+ PERF_VFD_MODE_4_FIBERS = 22,
+ PERF_VFD_TOTAL_VERTICES = 23,
+ PERF_VFD_NUM_ATTR_MISS = 24,
+ PERF_VFD_1_BURST_REQ = 25,
+ PERF_VFDP_STALL_CYCLES_VFD = 26,
+ PERF_VFDP_STALL_CYCLES_VFD_INDEX = 27,
+ PERF_VFDP_STALL_CYCLES_VFD_PROG = 28,
+ PERF_VFDP_STARVE_CYCLES_PC = 29,
+ PERF_VFDP_VS_STAGE_32_WAVES = 30,
+};
+
+enum a5xx_hlsq_perfcounter_select {
+ PERF_HLSQ_BUSY_CYCLES = 0,
+ PERF_HLSQ_STALL_CYCLES_UCHE = 1,
+ PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
+ PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
+ PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
+ PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
+ PERF_HLSQ_FS_STAGE_32_WAVES = 6,
+ PERF_HLSQ_FS_STAGE_64_WAVES = 7,
+ PERF_HLSQ_QUADS = 8,
+ PERF_HLSQ_SP_STATE_COPY_TRANS_FS_STAGE = 9,
+ PERF_HLSQ_SP_STATE_COPY_TRANS_VS_STAGE = 10,
+ PERF_HLSQ_TP_STATE_COPY_TRANS_FS_STAGE = 11,
+ PERF_HLSQ_TP_STATE_COPY_TRANS_VS_STAGE = 12,
+ PERF_HLSQ_CS_INVOCATIONS = 13,
+ PERF_HLSQ_COMPUTE_DRAWCALLS = 14,
+};
+
+enum a5xx_vpc_perfcounter_select {
+ PERF_VPC_BUSY_CYCLES = 0,
+ PERF_VPC_WORKING_CYCLES = 1,
+ PERF_VPC_STALL_CYCLES_UCHE = 2,
+ PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
+ PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
+ PERF_VPC_STALL_CYCLES_PC = 5,
+ PERF_VPC_STALL_CYCLES_SP_LM = 6,
+ PERF_VPC_POS_EXPORT_STALL_CYCLES = 7,
+ PERF_VPC_STARVE_CYCLES_SP = 8,
+ PERF_VPC_STARVE_CYCLES_LRZ = 9,
+ PERF_VPC_PC_PRIMITIVES = 10,
+ PERF_VPC_SP_COMPONENTS = 11,
+ PERF_VPC_SP_LM_PRIMITIVES = 12,
+ PERF_VPC_SP_LM_COMPONENTS = 13,
+ PERF_VPC_SP_LM_DWORDS = 14,
+ PERF_VPC_STREAMOUT_COMPONENTS = 15,
+ PERF_VPC_GRANT_PHASES = 16,
+};
+
+enum a5xx_tse_perfcounter_select {
+ PERF_TSE_BUSY_CYCLES = 0,
+ PERF_TSE_CLIPPING_CYCLES = 1,
+ PERF_TSE_STALL_CYCLES_RAS = 2,
+ PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
+ PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
+ PERF_TSE_STARVE_CYCLES_PC = 5,
+ PERF_TSE_INPUT_PRIM = 6,
+ PERF_TSE_INPUT_NULL_PRIM = 7,
+ PERF_TSE_TRIVAL_REJ_PRIM = 8,
+ PERF_TSE_CLIPPED_PRIM = 9,
+ PERF_TSE_ZERO_AREA_PRIM = 10,
+ PERF_TSE_FACENESS_CULLED_PRIM = 11,
+ PERF_TSE_ZERO_PIXEL_PRIM = 12,
+ PERF_TSE_OUTPUT_NULL_PRIM = 13,
+ PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
+ PERF_TSE_CINVOCATION = 15,
+ PERF_TSE_CPRIMITIVES = 16,
+ PERF_TSE_2D_INPUT_PRIM = 17,
+ PERF_TSE_2D_ALIVE_CLCLES = 18,
+};
+
+enum a5xx_ras_perfcounter_select {
+ PERF_RAS_BUSY_CYCLES = 0,
+ PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
+ PERF_RAS_STALL_CYCLES_LRZ = 2,
+ PERF_RAS_STARVE_CYCLES_TSE = 3,
+ PERF_RAS_SUPER_TILES = 4,
+ PERF_RAS_8X4_TILES = 5,
+ PERF_RAS_MASKGEN_ACTIVE = 6,
+ PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
+ PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
+ PERF_RAS_PRIM_KILLED_INVISILBE = 9,
+};
+
+enum a5xx_lrz_perfcounter_select {
+ PERF_LRZ_BUSY_CYCLES = 0,
+ PERF_LRZ_STARVE_CYCLES_RAS = 1,
+ PERF_LRZ_STALL_CYCLES_RB = 2,
+ PERF_LRZ_STALL_CYCLES_VSC = 3,
+ PERF_LRZ_STALL_CYCLES_VPC = 4,
+ PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
+ PERF_LRZ_STALL_CYCLES_UCHE = 6,
+ PERF_LRZ_LRZ_READ = 7,
+ PERF_LRZ_LRZ_WRITE = 8,
+ PERF_LRZ_READ_LATENCY = 9,
+ PERF_LRZ_MERGE_CACHE_UPDATING = 10,
+ PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
+ PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
+ PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
+ PERF_LRZ_FULL_8X8_TILES = 14,
+ PERF_LRZ_PARTIAL_8X8_TILES = 15,
+ PERF_LRZ_TILE_KILLED = 16,
+ PERF_LRZ_TOTAL_PIXEL = 17,
+ PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
+};
+
+enum a5xx_uche_perfcounter_select {
+ PERF_UCHE_BUSY_CYCLES = 0,
+ PERF_UCHE_STALL_CYCLES_VBIF = 1,
+ PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
+ PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
+ PERF_UCHE_VBIF_READ_BEATS_TP = 4,
+ PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
+ PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
+ PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
+ PERF_UCHE_VBIF_READ_BEATS_SP = 8,
+ PERF_UCHE_READ_REQUESTS_TP = 9,
+ PERF_UCHE_READ_REQUESTS_VFD = 10,
+ PERF_UCHE_READ_REQUESTS_HLSQ = 11,
+ PERF_UCHE_READ_REQUESTS_LRZ = 12,
+ PERF_UCHE_READ_REQUESTS_SP = 13,
+ PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
+ PERF_UCHE_WRITE_REQUESTS_SP = 15,
+ PERF_UCHE_WRITE_REQUESTS_VPC = 16,
+ PERF_UCHE_WRITE_REQUESTS_VSC = 17,
+ PERF_UCHE_EVICTS = 18,
+ PERF_UCHE_BANK_REQ0 = 19,
+ PERF_UCHE_BANK_REQ1 = 20,
+ PERF_UCHE_BANK_REQ2 = 21,
+ PERF_UCHE_BANK_REQ3 = 22,
+ PERF_UCHE_BANK_REQ4 = 23,
+ PERF_UCHE_BANK_REQ5 = 24,
+ PERF_UCHE_BANK_REQ6 = 25,
+ PERF_UCHE_BANK_REQ7 = 26,
+ PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
+ PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
+ PERF_UCHE_GMEM_READ_BEATS = 29,
+ PERF_UCHE_FLAG_COUNT = 30,
+};
+
+enum a5xx_tp_perfcounter_select {
+ PERF_TP_BUSY_CYCLES = 0,
+ PERF_TP_STALL_CYCLES_UCHE = 1,
+ PERF_TP_LATENCY_CYCLES = 2,
+ PERF_TP_LATENCY_TRANS = 3,
+ PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
+ PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
+ PERF_TP_L1_CACHELINE_REQUESTS = 6,
+ PERF_TP_L1_CACHELINE_MISSES = 7,
+ PERF_TP_SP_TP_TRANS = 8,
+ PERF_TP_TP_SP_TRANS = 9,
+ PERF_TP_OUTPUT_PIXELS = 10,
+ PERF_TP_FILTER_WORKLOAD_16BIT = 11,
+ PERF_TP_FILTER_WORKLOAD_32BIT = 12,
+ PERF_TP_QUADS_RECEIVED = 13,
+ PERF_TP_QUADS_OFFSET = 14,
+ PERF_TP_QUADS_SHADOW = 15,
+ PERF_TP_QUADS_ARRAY = 16,
+ PERF_TP_QUADS_GRADIENT = 17,
+ PERF_TP_QUADS_1D = 18,
+ PERF_TP_QUADS_2D = 19,
+ PERF_TP_QUADS_BUFFER = 20,
+ PERF_TP_QUADS_3D = 21,
+ PERF_TP_QUADS_CUBE = 22,
+ PERF_TP_STATE_CACHE_REQUESTS = 23,
+ PERF_TP_STATE_CACHE_MISSES = 24,
+ PERF_TP_DIVERGENT_QUADS_RECEIVED = 25,
+ PERF_TP_BINDLESS_STATE_CACHE_REQUESTS = 26,
+ PERF_TP_BINDLESS_STATE_CACHE_MISSES = 27,
+ PERF_TP_PRT_NON_RESIDENT_EVENTS = 28,
+ PERF_TP_OUTPUT_PIXELS_POINT = 29,
+ PERF_TP_OUTPUT_PIXELS_BILINEAR = 30,
+ PERF_TP_OUTPUT_PIXELS_MIP = 31,
+ PERF_TP_OUTPUT_PIXELS_ANISO = 32,
+ PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 33,
+ PERF_TP_FLAG_CACHE_REQUESTS = 34,
+ PERF_TP_FLAG_CACHE_MISSES = 35,
+ PERF_TP_L1_5_L2_REQUESTS = 36,
+ PERF_TP_2D_OUTPUT_PIXELS = 37,
+ PERF_TP_2D_OUTPUT_PIXELS_POINT = 38,
+ PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 39,
+ PERF_TP_2D_FILTER_WORKLOAD_16BIT = 40,
+ PERF_TP_2D_FILTER_WORKLOAD_32BIT = 41,
+};
+
+enum a5xx_sp_perfcounter_select {
+ PERF_SP_BUSY_CYCLES = 0,
+ PERF_SP_ALU_WORKING_CYCLES = 1,
+ PERF_SP_EFU_WORKING_CYCLES = 2,
+ PERF_SP_STALL_CYCLES_VPC = 3,
+ PERF_SP_STALL_CYCLES_TP = 4,
+ PERF_SP_STALL_CYCLES_UCHE = 5,
+ PERF_SP_STALL_CYCLES_RB = 6,
+ PERF_SP_SCHEDULER_NON_WORKING = 7,
+ PERF_SP_WAVE_CONTEXTS = 8,
+ PERF_SP_WAVE_CONTEXT_CYCLES = 9,
+ PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
+ PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
+ PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
+ PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
+ PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
+ PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
+ PERF_SP_WAVE_CTRL_CYCLES = 16,
+ PERF_SP_WAVE_LOAD_CYCLES = 17,
+ PERF_SP_WAVE_EMIT_CYCLES = 18,
+ PERF_SP_WAVE_NOP_CYCLES = 19,
+ PERF_SP_WAVE_WAIT_CYCLES = 20,
+ PERF_SP_WAVE_FETCH_CYCLES = 21,
+ PERF_SP_WAVE_IDLE_CYCLES = 22,
+ PERF_SP_WAVE_END_CYCLES = 23,
+ PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
+ PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
+ PERF_SP_WAVE_JOIN_CYCLES = 26,
+ PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
+ PERF_SP_LM_STORE_INSTRUCTIONS = 28,
+ PERF_SP_LM_ATOMICS = 29,
+ PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
+ PERF_SP_GM_STORE_INSTRUCTIONS = 31,
+ PERF_SP_GM_ATOMICS = 32,
+ PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
+ PERF_SP_VS_STAGE_CFLOW_INSTRUCTIONS = 34,
+ PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 35,
+ PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 36,
+ PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 37,
+ PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 38,
+ PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 39,
+ PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 40,
+ PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 41,
+ PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 42,
+ PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 43,
+ PERF_SP_VS_INSTRUCTIONS = 44,
+ PERF_SP_FS_INSTRUCTIONS = 45,
+ PERF_SP_ADDR_LOCK_COUNT = 46,
+ PERF_SP_UCHE_READ_TRANS = 47,
+ PERF_SP_UCHE_WRITE_TRANS = 48,
+ PERF_SP_EXPORT_VPC_TRANS = 49,
+ PERF_SP_EXPORT_RB_TRANS = 50,
+ PERF_SP_PIXELS_KILLED = 51,
+ PERF_SP_ICL1_REQUESTS = 52,
+ PERF_SP_ICL1_MISSES = 53,
+ PERF_SP_ICL0_REQUESTS = 54,
+ PERF_SP_ICL0_MISSES = 55,
+ PERF_SP_HS_INSTRUCTIONS = 56,
+ PERF_SP_DS_INSTRUCTIONS = 57,
+ PERF_SP_GS_INSTRUCTIONS = 58,
+ PERF_SP_CS_INSTRUCTIONS = 59,
+ PERF_SP_GPR_READ = 60,
+ PERF_SP_GPR_WRITE = 61,
+ PERF_SP_LM_CH0_REQUESTS = 62,
+ PERF_SP_LM_CH1_REQUESTS = 63,
+ PERF_SP_LM_BANK_CONFLICTS = 64,
+};
+
+enum a5xx_rb_perfcounter_select {
+ PERF_RB_BUSY_CYCLES = 0,
+ PERF_RB_STALL_CYCLES_CCU = 1,
+ PERF_RB_STALL_CYCLES_HLSQ = 2,
+ PERF_RB_STALL_CYCLES_FIFO0_FULL = 3,
+ PERF_RB_STALL_CYCLES_FIFO1_FULL = 4,
+ PERF_RB_STALL_CYCLES_FIFO2_FULL = 5,
+ PERF_RB_STARVE_CYCLES_SP = 6,
+ PERF_RB_STARVE_CYCLES_LRZ_TILE = 7,
+ PERF_RB_STARVE_CYCLES_CCU = 8,
+ PERF_RB_STARVE_CYCLES_Z_PLANE = 9,
+ PERF_RB_STARVE_CYCLES_BARY_PLANE = 10,
+ PERF_RB_Z_WORKLOAD = 11,
+ PERF_RB_HLSQ_ACTIVE = 12,
+ PERF_RB_Z_READ = 13,
+ PERF_RB_Z_WRITE = 14,
+ PERF_RB_C_READ = 15,
+ PERF_RB_C_WRITE = 16,
+ PERF_RB_TOTAL_PASS = 17,
+ PERF_RB_Z_PASS = 18,
+ PERF_RB_Z_FAIL = 19,
+ PERF_RB_S_FAIL = 20,
+ PERF_RB_BLENDED_FXP_COMPONENTS = 21,
+ PERF_RB_BLENDED_FP16_COMPONENTS = 22,
+ RB_RESERVED = 23,
+ PERF_RB_2D_ALIVE_CYCLES = 24,
+ PERF_RB_2D_STALL_CYCLES_A2D = 25,
+ PERF_RB_2D_STARVE_CYCLES_SRC = 26,
+ PERF_RB_2D_STARVE_CYCLES_SP = 27,
+ PERF_RB_2D_STARVE_CYCLES_DST = 28,
+ PERF_RB_2D_VALID_PIXELS = 29,
+};
+
+enum a5xx_rb_samples_perfcounter_select {
+ TOTAL_SAMPLES = 0,
+ ZPASS_SAMPLES = 1,
+ ZFAIL_SAMPLES = 2,
+ SFAIL_SAMPLES = 3,
+};
+
+enum a5xx_vsc_perfcounter_select {
+ PERF_VSC_BUSY_CYCLES = 0,
+ PERF_VSC_WORKING_CYCLES = 1,
+ PERF_VSC_STALL_CYCLES_UCHE = 2,
+ PERF_VSC_EOT_NUM = 3,
+};
+
+enum a5xx_ccu_perfcounter_select {
+ PERF_CCU_BUSY_CYCLES = 0,
+ PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
+ PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
+ PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
+ PERF_CCU_DEPTH_BLOCKS = 4,
+ PERF_CCU_COLOR_BLOCKS = 5,
+ PERF_CCU_DEPTH_BLOCK_HIT = 6,
+ PERF_CCU_COLOR_BLOCK_HIT = 7,
+ PERF_CCU_PARTIAL_BLOCK_READ = 8,
+ PERF_CCU_GMEM_READ = 9,
+ PERF_CCU_GMEM_WRITE = 10,
+ PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
+ PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
+ PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
+ PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
+ PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
+ PERF_CCU_COLOR_READ_FLAG0_COUNT = 16,
+ PERF_CCU_COLOR_READ_FLAG1_COUNT = 17,
+ PERF_CCU_COLOR_READ_FLAG2_COUNT = 18,
+ PERF_CCU_COLOR_READ_FLAG3_COUNT = 19,
+ PERF_CCU_COLOR_READ_FLAG4_COUNT = 20,
+ PERF_CCU_2D_BUSY_CYCLES = 21,
+ PERF_CCU_2D_RD_REQ = 22,
+ PERF_CCU_2D_WR_REQ = 23,
+ PERF_CCU_2D_REORDER_STARVE_CYCLES = 24,
+ PERF_CCU_2D_PIXELS = 25,
+};
+
+enum a5xx_cmp_perfcounter_select {
+ PERF_CMPDECMP_STALL_CYCLES_VBIF = 0,
+ PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
+ PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
+ PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
+ PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
+ PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
+ PERF_CMPDECMP_VBIF_READ_DATA = 7,
+ PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
+ PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
+ PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 15,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 16,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 17,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 18,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 19,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 20,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 21,
+ PERF_CMPDECMP_2D_RD_DATA = 22,
+ PERF_CMPDECMP_2D_WR_DATA = 23,
+};
+
+enum a5xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+ A5XX_TEX_BUFFER = 4,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d
+
+#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e
+
+#define REG_A5XX_CP_ME_NRT_DATA 0x00000810
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT 31
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK 0x40000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT 30
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK;
+}
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__MASK 0x20000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT 29
+static inline uint32_t A5XX_RBBM_STATUS_HLSQ_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT) & A5XX_RBBM_STATUS_HLSQ_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VSC_BUSY__MASK 0x10000000
+#define A5XX_RBBM_STATUS_VSC_BUSY__SHIFT 28
+static inline uint32_t A5XX_RBBM_STATUS_VSC_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VSC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VSC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TPL1_BUSY__MASK 0x08000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT 27
+static inline uint32_t A5XX_RBBM_STATUS_TPL1_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT) & A5XX_RBBM_STATUS_TPL1_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_SP_BUSY__MASK 0x04000000
+#define A5XX_RBBM_STATUS_SP_BUSY__SHIFT 26
+static inline uint32_t A5XX_RBBM_STATUS_SP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_SP_BUSY__SHIFT) & A5XX_RBBM_STATUS_SP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_UCHE_BUSY__MASK 0x02000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT 25
+static inline uint32_t A5XX_RBBM_STATUS_UCHE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_UCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VPC_BUSY__MASK 0x01000000
+#define A5XX_RBBM_STATUS_VPC_BUSY__SHIFT 24
+static inline uint32_t A5XX_RBBM_STATUS_VPC_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VPC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VPC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFDP_BUSY__MASK 0x00800000
+#define A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT 23
+static inline uint32_t A5XX_RBBM_STATUS_VFDP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFDP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFD_BUSY__MASK 0x00400000
+#define A5XX_RBBM_STATUS_VFD_BUSY__SHIFT 22
+static inline uint32_t A5XX_RBBM_STATUS_VFD_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VFD_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TESS_BUSY__MASK 0x00200000
+#define A5XX_RBBM_STATUS_TESS_BUSY__SHIFT 21
+static inline uint32_t A5XX_RBBM_STATUS_TESS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TESS_BUSY__SHIFT) & A5XX_RBBM_STATUS_TESS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK 0x00100000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT 20
+static inline uint32_t A5XX_RBBM_STATUS_PC_VSD_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK 0x00080000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT 19
+static inline uint32_t A5XX_RBBM_STATUS_PC_DCALL_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK 0x00040000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT 18
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_DCOM_BUSY__MASK 0x00020000
+#define A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT 17
+static inline uint32_t A5XX_RBBM_STATUS_DCOM_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT) & A5XX_RBBM_STATUS_DCOM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_COM_BUSY__MASK 0x00010000
+#define A5XX_RBBM_STATUS_COM_BUSY__SHIFT 16
+static inline uint32_t A5XX_RBBM_STATUS_COM_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_COM_BUSY__SHIFT) & A5XX_RBBM_STATUS_COM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_LRZ_BUZY__MASK 0x00008000
+#define A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT 15
+static inline uint32_t A5XX_RBBM_STATUS_LRZ_BUZY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT) & A5XX_RBBM_STATUS_LRZ_BUZY__MASK;
+}
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK 0x00004000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT 14
+static inline uint32_t A5XX_RBBM_STATUS_A2D_DSP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT) & A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK 0x00002000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT 13
+static inline uint32_t A5XX_RBBM_STATUS_CCUFCHE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RB_BUSY__MASK 0x00001000
+#define A5XX_RBBM_STATUS_RB_BUSY__SHIFT 12
+static inline uint32_t A5XX_RBBM_STATUS_RB_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_RB_BUSY__SHIFT) & A5XX_RBBM_STATUS_RB_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RAS_BUSY__MASK 0x00000800
+#define A5XX_RBBM_STATUS_RAS_BUSY__SHIFT 11
+static inline uint32_t A5XX_RBBM_STATUS_RAS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_RAS_BUSY__SHIFT) & A5XX_RBBM_STATUS_RAS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TSE_BUSY__MASK 0x00000400
+#define A5XX_RBBM_STATUS_TSE_BUSY__SHIFT 10
+static inline uint32_t A5XX_RBBM_STATUS_TSE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TSE_BUSY__SHIFT) & A5XX_RBBM_STATUS_TSE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VBIF_BUSY__MASK 0x00000200
+#define A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT 9
+static inline uint32_t A5XX_RBBM_STATUS_VBIF_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT) & A5XX_RBBM_STATUS_VBIF_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK 0x00000100
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT 8
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT 7
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY__MASK 0x00000040
+#define A5XX_RBBM_STATUS_CP_BUSY__SHIFT 6
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK 0x00000020
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT 5
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_MASTER_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK 0x00000010
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT 4
+static inline uint32_t A5XX_RBBM_STATUS_CP_CRASH_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK 0x00000008
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT 3
+static inline uint32_t A5XX_RBBM_STATUS_CP_ETS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK 0x00000004
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT 2
+static inline uint32_t A5XX_RBBM_STATUS_CP_PFP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__MASK 0x00000002
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT 1
+static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ME_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+#define A5XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000bc2
+#define A5XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A5XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001fe00
+#define A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A5XX_VSC_SIZE_ADDRESS_LO 0x00000bc3
+
+#define REG_A5XX_VSC_SIZE_ADDRESS_HI 0x00000bc4
+
+#define REG_A5XX_UNKNOWN_0BC5 0x00000bc5
+
+#define REG_A5XX_UNKNOWN_0BC6 0x00000bc6
+
+static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
+#define A5XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A5XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A5XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
+#define A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A5XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
+#define A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_HI(uint32_t i0) { return 0x00000be1 + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_RESOLVE_CNTL 0x00000cdd
+#define A5XX_VSC_RESOLVE_CNTL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_RESOLVE_CNTL_X__MASK 0x00007fff
+#define A5XX_VSC_RESOLVE_CNTL_X__SHIFT 0
+static inline uint32_t A5XX_VSC_RESOLVE_CNTL_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_RESOLVE_CNTL_X__SHIFT) & A5XX_VSC_RESOLVE_CNTL_X__MASK;
+}
+#define A5XX_VSC_RESOLVE_CNTL_Y__MASK 0x7fff0000
+#define A5XX_VSC_RESOLVE_CNTL_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_RESOLVE_CNTL_Y__SHIFT) & A5XX_VSC_RESOLVE_CNTL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04
+
+#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05
+
+#define REG_A5XX_PC_START_INDEX 0x00000d06
+
+#define REG_A5XX_PC_MAX_INDEX 0x00000d07
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+#define A5XX_VPC_DBG_ECO_CNTL_ALLFLATOPTDIS 0x00000400
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+#define A5XX_VPC_MODE_CNTL_BINNING_PASS 0x00000001
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_EN0 0x000030c0
+
+#define REG_A5XX_VBIF_PERF_CNT_EN1 0x000030c1
+
+#define REG_A5XX_VBIF_PERF_CNT_EN2 0x000030c2
+
+#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
+
+#define REG_A5XX_GRAS_VS_CL_CNTL 0x0000e001
+#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CNTL 0x0000e005
+#define A5XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
+#define A5XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
+#define A5XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
+#define A5XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008
+#define A5XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010
+#define A5XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020
+#define A5XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
+#define A5XX_GRAS_CNTL_COORD_MASK__SHIFT 6
+static inline uint32_t A5XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CNTL_COORD_MASK__SHIFT) & A5XX_GRAS_CNTL_COORD_MASK__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A5XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A5XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000
+#define A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13
+static inline uint32_t A5XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val)
+{
+ return ((val) << A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A5XX_GRAS_SU_CNTL_LINE_MODE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1 0x00000002
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+#define A5XX_GRAS_SC_CNTL_BINNING_PASS 0x00000001
+#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+#define A5XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A5XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A5XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+#define A5XX_GRAS_LRZ_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_BINNING_PASS 0x00000001
+#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040
+#define A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE 0x00000080
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
+#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008
+#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010
+#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
+static inline uint32_t A5XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147
+#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+#define A5XX_RB_DEPTH_PLANE_CNTL_UNK1 0x00000002
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2
+
+#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3
+
+#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4
+#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5
+#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_SAMPLE_COUNT_CONTROL 0x0000e1d1
+#define A5XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A5XX_RB_BLIT_CNTL 0x0000e210
+#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000000f
+#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val)
+{
+ return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001
+
+#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
+
+#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215
+
+#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216
+#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b
+
+#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
+#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004
+#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
+#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
+static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_LO 0x0000e267
+
+#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_HI 0x0000e268
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+#define A5XX_VPC_CNTL_0_VARYING 0x00000800
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+#define REG_A5XX_UNKNOWN_E292 0x0000e292
+
+#define REG_A5XX_UNKNOWN_E293 0x0000e293
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_VPC_CLIP_CNTL 0x0000e29a
+#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+#define A5XX_VPC_PACK_PSIZELOC__MASK 0x0000ff00
+#define A5XX_VPC_PACK_PSIZELOC__SHIFT 8
+static inline uint32_t A5XX_VPC_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_PSIZELOC__SHIFT) & A5XX_VPC_PACK_PSIZELOC__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_VPC_SO_BUF_CNTL 0x0000e2a1
+#define A5XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
+#define A5XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
+#define A5XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
+#define A5XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
+#define A5XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+#define A5XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
+
+#define REG_A5XX_VPC_SO_CNTL 0x0000e2a3
+#define A5XX_VPC_SO_CNTL_ENABLE 0x00010000
+
+#define REG_A5XX_VPC_SO_PROG 0x0000e2a4
+#define A5XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A5XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A5XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A5XX_VPC_SO_PROG_A_BUF__SHIFT) & A5XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A5XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A5XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A5XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A5XX_VPC_SO_PROG_A_EN 0x00000800
+#define A5XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A5XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A5XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A5XX_VPC_SO_PROG_B_BUF__SHIFT) & A5XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A5XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A5XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A5XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A5XX_VPC_SO_PROG_B_EN 0x00800000
+
+static inline uint32_t REG_A5XX_VPC_SO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000e2a8 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000e2a9 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000e2aa + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000e2ab + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000e2ac + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x0000e2ad + 0x7*i0; }
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100
+#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200
+#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
+
+#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
+#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
+
+#define REG_A5XX_PC_CLIP_CNTL 0x0000e389
+#define A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_PC_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_PC_GS_LAYERED 0x0000e38d
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A5XX_PC_HS_PARAM_CW 0x00800000
+#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONFIG 0x0000e584
+#define A5XX_SP_VS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONFIG 0x0000e585
+#define A5XX_SP_FS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONFIG 0x0000e586
+#define A5XX_SP_HS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONFIG 0x0000e587
+#define A5XX_SP_DS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONFIG 0x0000e588
+#define A5XX_SP_GS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+#define A5XX_SP_CS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592
+#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
+#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
+static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+{
+ return ((val) << A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
+
+#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
+#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_CS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
+
+#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
+
+#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
+
+#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E602 0x0000e602
+
+#define REG_A5XX_SP_HS_OBJ_START_LO 0x0000e603
+
+#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
+
+#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
+
+#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
+
+#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
+
+#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
+
+#define REG_A5XX_SP_GS_OBJ_START_LO 0x0000e65c
+
+#define REG_A5XX_SP_GS_OBJ_START_HI 0x0000e65d
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000e706
+
+#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000e707
+
+#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700
+
+#define REG_A5XX_TPL1_HS_TEX_COUNT 0x0000e701
+
+#define REG_A5XX_TPL1_DS_TEX_COUNT 0x0000e702
+
+#define REG_A5XX_TPL1_GS_TEX_COUNT 0x0000e703
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_HS_TEX_SAMP_LO 0x0000e724
+
+#define REG_A5XX_TPL1_HS_TEX_SAMP_HI 0x0000e725
+
+#define REG_A5XX_TPL1_DS_TEX_SAMP_LO 0x0000e726
+
+#define REG_A5XX_TPL1_DS_TEX_SAMP_HI 0x0000e727
+
+#define REG_A5XX_TPL1_GS_TEX_SAMP_LO 0x0000e728
+
+#define REG_A5XX_TPL1_GS_TEX_SAMP_HI 0x0000e729
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_HS_TEX_CONST_LO 0x0000e72c
+
+#define REG_A5XX_TPL1_HS_TEX_CONST_HI 0x0000e72d
+
+#define REG_A5XX_TPL1_DS_TEX_CONST_LO 0x0000e72e
+
+#define REG_A5XX_TPL1_DS_TEX_CONST_HI 0x0000e72f
+
+#define REG_A5XX_TPL1_GS_TEX_CONST_LO 0x0000e730
+
+#define REG_A5XX_TPL1_GS_TEX_CONST_HI 0x0000e731
+
+#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750
+
+#define REG_A5XX_TPL1_CS_TEX_COUNT 0x0000e751
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_CS_TEX_SAMP_LO 0x0000e75c
+
+#define REG_A5XX_TPL1_CS_TEX_SAMP_HI 0x0000e75d
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_CS_TEX_CONST_LO 0x0000e760
+
+#define REG_A5XX_TPL1_CS_TEX_CONST_HI 0x0000e761
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000001
+#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK 0x00000004
+#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT 2
+static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONFIG 0x0000e78b
+#define A5XX_HLSQ_VS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONFIG 0x0000e78c
+#define A5XX_HLSQ_FS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONFIG 0x0000e78d
+#define A5XX_HLSQ_HS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONFIG 0x0000e78e
+#define A5XX_HLSQ_DS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONFIG 0x0000e78f
+#define A5XX_HLSQ_GS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+#define A5XX_HLSQ_CS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+#define A5XX_HLSQ_VS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+#define A5XX_HLSQ_FS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793
+#define A5XX_HLSQ_HS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794
+#define A5XX_HLSQ_DS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795
+#define A5XX_HLSQ_GS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+#define A5XX_HLSQ_CS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A5XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
+#define A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK0__MASK;
+}
+#define A5XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
+#define A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK1__MASK;
+}
+#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5
+
+#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8
+
+#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9
+
+#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
+
+#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd
+
+#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce
+
+#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf
+
+#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2
+
+#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3
+
+#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9
+
+#define REG_A5XX_HLSQ_CS_CONSTLEN 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
+
+#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW2 0x00002103
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW3 0x00002104
+
+#define REG_A5XX_RB_2D_SRC_INFO 0x00002107
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000
+#define A5XX_RB_2D_SRC_INFO_SRGB 0x00002000
+
+#define REG_A5XX_RB_2D_SRC_LO 0x00002108
+
+#define REG_A5XX_RB_2D_SRC_HI 0x00002109
+
+#define REG_A5XX_RB_2D_SRC_SIZE 0x0000210a
+#define A5XX_RB_2D_SRC_SIZE_PITCH__MASK 0x0000ffff
+#define A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK;
+}
+#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK 0xffff0000
+#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT 16
+static inline uint32_t A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_2D_DST_INFO 0x00002110
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000
+#define A5XX_RB_2D_DST_INFO_SRGB 0x00002000
+
+#define REG_A5XX_RB_2D_DST_LO 0x00002111
+
+#define REG_A5XX_RB_2D_DST_HI 0x00002112
+
+#define REG_A5XX_RB_2D_DST_SIZE 0x00002113
+#define A5XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
+#define A5XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK;
+}
+#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK 0xffff0000
+#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT 16
+static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_PITCH 0x00002142
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__MASK 0xffffffff
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
+
+#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+
+#define REG_A5XX_RB_2D_DST_FLAGS_PITCH 0x00002145
+#define A5XX_RB_2D_DST_FLAGS_PITCH__MASK 0xffffffff
+#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
+#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
+
+#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000
+#define A5XX_GRAS_2D_SRC_INFO_SRGB 0x00002000
+
+#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000
+#define A5XX_GRAS_2D_DST_INFO_SRGB 0x00002000
+
+#define REG_A5XX_UNKNOWN_2184 0x00002184
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80
+#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7
+static inline uint32_t A5XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
+{
+ return ((val) << A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A5XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A5XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_BUFFER 0x00000010
+#define A5XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A5XX_TEX_CONST_2_PITCHALIGN__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0xe0000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
+static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_TILE_ALL 0x08000000
+#define A5XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+#define REG_A5XX_SSBO_0_0 0x00000000
+#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0
+#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_0_1 0x00000001
+#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A5XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_2 0x00000002
+#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_3 0x00000003
+#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A5XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A5XX_SSBO_1_0 0x00000000
+#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A5XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK;
+}
+#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A5XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A5XX_SSBO_1_1 0x00000001
+#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A5XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK;
+}
+
+#define REG_A5XX_SSBO_2_0 0x00000000
+#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff
+#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_2_1 0x00000001
+#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff
+#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
+}
+
+#define REG_A5XX_UBO_0 0x00000000
+#define A5XX_UBO_0_BASE_LO__MASK 0xffffffff
+#define A5XX_UBO_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_UBO_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_UBO_0_BASE_LO__SHIFT) & A5XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_UBO_1 0x00000001
+#define A5XX_UBO_1_BASE_HI__MASK 0x0001ffff
+#define A5XX_UBO_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK;
+}
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
new file mode 100644
index 000000000..6bd397a85
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+#include "a5xx_gpu.h"
+
+static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "PFP state:\n");
+
+ for (i = 0; i < 36; i++) {
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
+ }
+}
+
+static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ME state:\n");
+
+ for (i = 0; i < 29; i++) {
+ gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
+ }
+}
+
+static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "MEQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 64; i++) {
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+ }
+}
+
+static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ROQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 512 / 4; i++) {
+ uint32_t val[4];
+ int j;
+ for (j = 0; j < 4; j++)
+ val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA);
+ drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
+ val[0], val[1], val[2], val[3]);
+ }
+}
+
+static int show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+ node->info_ent->data;
+
+ show(priv->gpu, &p);
+ return 0;
+}
+
+#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
+static struct drm_info_list a5xx_debugfs_list[] = {
+ ENT(pfp),
+ ENT(me),
+ ENT(meq),
+ ENT(roq),
+};
+
+/* for debugfs files that can be written to, we can't use drm helper: */
+static int
+reset_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ /* TODO do we care about trying to make sure the GPU is idle?
+ * Since this is just a debug feature limited to CAP_SYS_ADMIN,
+ * maybe it is fine to let the user keep both pieces if they
+ * try to reset an active GPU.
+ */
+
+ mutex_lock(&gpu->lock);
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
+ adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]);
+ adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
+
+ if (a5xx_gpu->pm4_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ }
+
+ gpu->needs_hw_init = true;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ gpu->funcs->recover(gpu);
+
+ pm_runtime_put_sync(&gpu->pdev->dev);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
+
+
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+{
+ struct drm_device *dev;
+
+ if (!minor)
+ return;
+
+ dev = minor->dev;
+
+ drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
+
+ debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev,
+ &reset_fops);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000..895a0e9db
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,1786 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/qcom_scm.h>
+#include <linux/pm_opp.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/slab.h>
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "a5xx_gpu.h"
+
+extern bool hang_debug;
+static void a5xx_dump(struct msm_gpu *gpu);
+
+#define GPU_PAS_ID 13
+
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (a5xx_gpu->has_whereami) {
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+ }
+}
+
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ bool sync)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ /*
+ * Most flush operations need to issue a WHERE_AM_I opcode to sync up
+ * the rptr shadow
+ */
+ if (sync)
+ update_shadow_rptr(gpu, ring);
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ struct msm_gem_object *obj;
+ uint32_t *ptr, dwords;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ /* copy commands into RB: */
+ obj = submit->bos[submit->cmd[i].idx].obj;
+ dwords = submit->cmd[i].size;
+
+ ptr = msm_gem_get_vaddr(&obj->base);
+
+ /* _get_vaddr() shouldn't fail at this point,
+ * since we've already mapped it once in
+ * submit_reloc()
+ */
+ if (WARN_ON(IS_ERR_OR_NULL(ptr)))
+ return;
+
+ for (i = 0; i < dwords; i++) {
+ /* normally the OUT_PKTn() would wait
+ * for space for the packet. But since
+ * we just OUT_RING() the whole thing,
+ * need to call adreno_wait_ring()
+ * ourself:
+ */
+ adreno_wait_ring(ring, 1);
+ OUT_RING(ring, ptr[i]);
+ }
+
+ msm_gem_put_vaddr(&obj->base);
+
+ break;
+ }
+ }
+
+ a5xx_flush(gpu, ring, true);
+ a5xx_preempt_trigger(gpu);
+
+ /* we might not necessarily have a cmd from userspace to
+ * trigger an event to know that submit has completed, so
+ * do this manually:
+ */
+ a5xx_idle(gpu, ring);
+ ring->memptrs->fence = submit->seqno;
+ msm_gpu_retire(gpu);
+}
+
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i, ibs = 0;
+
+ if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
+ gpu->cur_ctx_seqno = 0;
+ a5xx_submit_in_rb(gpu, submit);
+ return;
+ }
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x1);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
+ }
+
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+ CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ /* A WHERE_AM_I packet is not needed after a YIELD */
+ a5xx_flush(gpu, ring, false);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
+}
+
+static const struct adreno_five_hwcg_regs {
+ u32 offset;
+ u32 value;
+} a5xx_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+}, a50x_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+}, a512_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct adreno_five_hwcg_regs *regs;
+ unsigned int i, sz;
+
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
+ regs = a50x_hwcg;
+ sz = ARRAY_SIZE(a50x_hwcg);
+ } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
+ regs = a512_hwcg;
+ sz = ARRAY_SIZE(a512_hwcg);
+ } else {
+ regs = a5xx_hwcg;
+ sz = ARRAY_SIZE(a5xx_hwcg);
+ }
+
+ for (i = 0; i < sz; i++)
+ gpu_write(gpu, regs[i].offset,
+ state ? regs[i].value : 0);
+
+ if (adreno_is_a540(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
+ }
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else if (adreno_is_a510(adreno_gpu)) {
+ /* Workaround for token and syncs */
+ OUT_RING(ring, 0x00000001);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a5xx_flush(gpu, ring, true);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
+ a5xx_flush(gpu, ring, false);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
+ struct drm_gem_object *obj)
+{
+ u32 *buf = msm_gem_get_vaddr(obj);
+
+ if (IS_ERR(buf))
+ return;
+
+ /*
+ * If the lowest nibble is 0xa that is an indication that this microcode
+ * has been patched. The actual version is in dword [3] but we only care
+ * about the patchlevel which is the lowest nibble of dword [3]
+ */
+ if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+ a5xx_gpu->has_whereami = true;
+
+ msm_gem_put_vaddr(obj);
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
+
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+
+ msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+
+ msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
+ a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#define SCM_GPU_ZAP_SHADER_RESUME 0
+
+static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ /*
+ * Adreno 506 have CPZ Retention feature and doesn't require
+ * to resume zap shader
+ */
+ if (adreno_is_a506(adreno_gpu))
+ return 0;
+
+ ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
+ if (ret)
+ DRM_ERROR("%s: zap-shader resume failed: %d\n",
+ gpu->name, ret);
+
+ return ret;
+}
+
+static int a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ /*
+ * If the zap shader is already loaded into memory we just need to kick
+ * the remote processor to reinitialize it
+ */
+ if (loaded)
+ return a5xx_zap_shader_resume(gpu);
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ u32 regbit;
+ int ret;
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* Select RBBM0 to countable 6 to get the busy status for devfreq */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range (0 to gpu->gmem) */
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
+
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ else
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
+ } else {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ else
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+ }
+
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x100 << 11 | 0x100 << 22));
+ else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x200 << 11 | 0x200 << 22));
+ else
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ /*
+ * Disable the RB sampler datapath DP2 clock gating optimization
+ * for 1-SP GPUs, as it is enabled by default.
+ */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
+
+ /* Disable UCHE global filter as SP can invalidate/flush independently */
+ gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /*
+ * In A5x, CCU can send context_done event of a particular context to
+ * UCHE which ultimately reaches CP even when there is valid
+ * transaction of that context inside CCU. This can let CP to program
+ * config registers, which will make the "valid transaction" inside
+ * CCU to be interpreted differently. This can cause gpu fault. This
+ * bug is fixed in latest A510 revision. To enable this bug fix -
+ * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
+ * (disable). For older A510 version this bit is unused.
+ */
+ if (adreno_is_a510(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
+
+ /* Enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit */
+ if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
+ regbit = 2;
+ else
+ regbit = 1;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
+
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
+
+ /* Disable All flat shading optimization (ALLFLATOPTDIS) */
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ /* SMMU */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Put the GPU into 64 bit by default */
+ gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /*
+ * VPC corner case with local memory load kill leads to corrupt
+ * internal state. Normal Disable does not work for all a5x chips.
+ * So do the following setting to disable it.
+ */
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
+ gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
+ }
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
+
+ /*
+ * If the microcode supports the WHERE_AM_I opcode then we can use that
+ * in lieu of the RPTR shadow and enable preemption. Otherwise, we
+ * can't safely use the RPTR shadow or preemption. In either case, the
+ * RPTR shadow should be disabled in hardware.
+ */
+ gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Create a privileged buffer for the RPTR shadow */
+ if (a5xx_gpu->has_whereami) {
+ if (!a5xx_gpu->shadow_bo) {
+ a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+ sizeof(u32) * gpu->nr_rings,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a5xx_gpu->shadow_bo,
+ &a5xx_gpu->shadow_iova);
+
+ if (IS_ERR(a5xx_gpu->shadow))
+ return PTR_ERR(a5xx_gpu->shadow);
+
+ msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+ shadowptr(a5xx_gpu, gpu->rb[0]));
+ } else if (gpu->nr_rings > 1) {
+ /* Disable preemption if WHERE_AM_I isn't available */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+ }
+
+ a5xx_preempt_hw_init(gpu);
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
+
+ a5xx_flush(gpu, gpu->rb[0], true);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ }
+
+ /*
+ * If the chip that we are using does support loading one, then
+ * try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a5xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a5xx_flush(gpu, gpu->rb[0], true);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ } else {
+ return ret;
+ }
+
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
+ }
+
+ if (hang_debug)
+ a5xx_dump(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ a5xx_preempt_fini(gpu);
+
+ if (a5xx_gpu->pm4_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->gpmu_bo);
+ }
+
+ if (a5xx_gpu->shadow_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->shadow_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (ring != a5xx_gpu->cur_ring) {
+ WARN(1, "Tried to idle a non-current ringbuffer\n");
+ return false;
+ }
+
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_gpu *gpu = arg;
+ pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
+
+ return 0;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24))
+ return;
+
+ DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ /*
+ * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
+ * before the source is cleared the interrupt will storm.
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+ if (priv->disable_err_irq) {
+ status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
+ A5XX_RBBM_INT_0_MASK_CP_SW;
+ }
+
+ /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu, status);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ a5xx_preempt_trigger(gpu);
+ msm_gpu_retire(gpu);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
+ 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+ 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+ 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+ 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+ 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+ 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+ 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+ 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+ 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+ 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+ 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+ 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+ 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+ 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+ 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+ 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+ 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+ 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+ 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+ 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+ 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+ 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+ 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+ 0xAC60, 0xAC60, ~0,
+};
+
+static void a5xx_dump(struct msm_gpu *gpu)
+{
+ DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
+ /* Halt the sp_input_clk at HM level */
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
+ a5xx_set_hwcg(gpu, true);
+ /* Turn on sp_input_clk at HM level */
+ gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
+ return 0;
+ }
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ u32 mask = 0xf;
+ int i, ret;
+
+ /* A506, A508, A510 have 3 XIN ports in VBIF */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu))
+ mask = 0x7;
+
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
+ mask) == mask);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries on Adreno A510 and A530 (the others will tend to lock up)
+ */
+ if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (a5xx_gpu->has_whereami)
+ for (i = 0; i < gpu->nr_rings; i++)
+ a5xx_gpu->shadow[i] = 0;
+
+ return 0;
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
+
+ return 0;
+}
+
+struct a5xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a5xx_gpu_state {
+ struct msm_gpu_state base;
+ u32 *hlsqregs;
+};
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new(gpu->dev,
+ SZ_1M, MSM_BO_WC, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+ val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+ { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+ { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+ struct a5xx_gpu_state *a5xx_state)
+{
+ struct a5xx_crashdumper dumper = { 0 };
+ u32 offset, count = 0;
+ u64 *ptr;
+ int i;
+
+ if (a5xx_crashdumper_init(gpu, &dumper))
+ return;
+
+ /* The script will be written at offset 0 */
+ ptr = dumper.ptr;
+
+ /* Start writing the data at offset 256k */
+ offset = dumper.iova + (256 * SZ_1K);
+
+ /* Count how many additional registers to get from the HLSQ aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ count += a5xx_hlsq_aperture_regs[i].count;
+
+ a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ /* Build the crashdump script */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 type = a5xx_hlsq_aperture_regs[i].type;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ /* Write the register to select the desired bank */
+ *ptr++ = ((u64) type << 8);
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+ (1 << 21) | 1;
+
+ *ptr++ = offset;
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+ | c;
+
+ offset += c * sizeof(u32);
+ }
+
+ /* Write two zeros to close off the script */
+ *ptr++ = 0;
+ *ptr++ = 0;
+
+ if (a5xx_crashdumper_run(gpu, &dumper)) {
+ kfree(a5xx_state->hlsqregs);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ return;
+ }
+
+ /* Copy the data from the crashdumper to the state */
+ memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+ count * sizeof(u32));
+
+ msm_gem_kernel_put(dumper.bo, gpu->aspace);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+ GFP_KERNEL);
+ bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24));
+
+ if (!a5xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Temporarily disable hardware clock gating before reading the hw */
+ a5xx_set_hwcg(gpu, false);
+
+ /* First get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+ a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ /*
+ * Get the HLSQ regs with the help of the crashdumper, but only if
+ * we are not stalled in an iommu fault (in which case the crashdumper
+ * would not have access to memory)
+ */
+ if (!stalled)
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
+ a5xx_set_hwcg(gpu, true);
+
+ return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ kfree(a5xx_state->hlsqregs);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a5xx_state);
+}
+
+static int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ int i, j;
+ u32 pos = 0;
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ /* Dump the additional a5xx HLSQ registers */
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ drm_printf(p, "registers-hlsq:\n");
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ for (j = 0; j < c; j++, pos++, o++) {
+ /*
+ * To keep the crashdump simple we pull the entire range
+ * for each register type but not all of the registers
+ * in the range are valid. Fortunately invalid registers
+ * stick out like a sore thumb with a value of
+ * 0xdeadbeef
+ */
+ if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ o << 2, a5xx_state->hlsqregs[pos]);
+ }
+ }
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
+static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ u64 busy_cycles;
+
+ busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
+ *out_sample_rate = clk_get_rate(gpu->core_clk);
+
+ return busy_cycles;
+}
+
+static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (a5xx_gpu->has_whereami)
+ return a5xx_gpu->shadow[ring->id];
+
+ return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .submit = a5xx_submit,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = a5xx_debugfs_init,
+#endif
+ .gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
+ .get_rptr = a5xx_get_rptr,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+static void check_speed_bin(struct device *dev)
+{
+ struct nvmem_cell *cell;
+ u32 val;
+
+ /*
+ * If the OPP table specifies a opp-supported-hw property then we have
+ * to set something with dev_pm_opp_set_supported_hw() or the table
+ * doesn't get populated so pick an arbitrary value that should
+ * ensure the default frequencies are selected but not conflict with any
+ * actual bins
+ */
+ val = 0x80;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+
+ if (!IS_ERR(cell)) {
+ void *buf = nvmem_cell_read(cell, NULL);
+
+ if (!IS_ERR(buf)) {
+ u8 bin = *((u8 *) buf);
+
+ val = (1 << bin);
+ kfree(buf);
+ }
+
+ nvmem_cell_put(cell);
+ }
+
+ devm_pm_opp_set_supported_hw(dev, &val, 1);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ unsigned int nr_rings;
+ int ret;
+
+ if (!pdev) {
+ DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ adreno_gpu->registers = a5xx_registers;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ check_speed_bin(&pdev->dev);
+
+ nr_rings = 4;
+
+ if (adreno_cmp_rev(ADRENO_REV(5, 1, 0, ANY_ID), config->rev))
+ nr_rings = 1;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000..c7187bcc5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+struct a5xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
+
+ struct drm_gem_object *shadow_bo;
+ uint64_t shadow_iova;
+ uint32_t *shadow;
+
+ /* True if the microcode supports the WHERE_AM_I opcode */
+ bool has_whereami;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+#ifdef CONFIG_DEBUG_FS
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+#endif
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_ABORT,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocation for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
+ ((ring)->id * sizeof(uint32_t)))
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_ABORT);
+}
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000..0e63a1429
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+ u32 ret = 0;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+ }
+
+ return ret;
+}
+
+/* Setup thermal limit management */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ /* Hard code the A530 GPU thermal sensor ID for the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ /* The threshold is fixed at 6000 for A530 */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /* Write the max power - hard coded to 5448 for A530 */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 config;
+
+ /* The battery current limiter isn't enabled for A540 */
+ config = AGC_LM_CONFIG_BCL_DISABLED;
+ config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+ /* For now disable GPMU side throttling */
+ config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ /* Fixed at 6000 for now */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+ PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ a5xx_flush(gpu, ring, true);
+
+ if (!a5xx_idle(gpu, ring)) {
+ DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE))
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+
+ if (!adreno_is_a530(adreno_gpu)) {
+ u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+ if (val)
+ DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
+ gpu->name, val);
+ }
+
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* This init sequence only applies to A530 */
+ if (!adreno_is_a530(adreno_gpu))
+ return;
+
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ /* Not all A5xx chips have a GPMU */
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
+ return 0;
+
+ /* Set up the limits management */
+ if (adreno_is_a530(adreno_gpu))
+ a530_lm_setup(gpu);
+ else if (adreno_is_a540(adreno_gpu))
+ a540_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ uint32_t dwords = 0, offset = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
+ return;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
+ (data[0] < 2) || (data[0] >=
+ (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
+ return;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ return;
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ ptr = msm_gem_kernel_new(drm, bosize,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
+ &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+ if (IS_ERR(ptr))
+ return;
+
+ msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
+ a5xx_gpu->gpmu_dwords = dwords;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 000000000..f58dd564d
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(struct timer_list *t)
+{
+ struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ /*
+ * Its possible that while a preemption request is in progress
+ * from an irq context, a user context trying to submit might
+ * fail to update the write pointer, because it determines
+ * that the preempt state is not PREEMPT_NONE.
+ *
+ * Close the race by introducing an intermediate
+ * state PREEMPT_ABORT to let the submit path
+ * know that the ringbuffer is not going to change
+ * and can safely update the write pointer.
+ */
+
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings == 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ a5xx_gpu->preempt[i]->wptr = 0;
+ a5xx_gpu->preempt[i]->rptr = 0;
+ a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+ a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
+ }
+
+ /* Write a 0 to signal that we aren't switching pagetables */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ void *counters;
+ struct drm_gem_object *bo = NULL, *counters_bo = NULL;
+ u64 iova = 0, counters_iova = 0;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ /* The buffer to store counters needs to be unprivileged */
+ counters = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
+ if (IS_ERR(counters)) {
+ msm_gem_kernel_put(bo, gpu->aspace);
+ return PTR_ERR(counters);
+ }
+
+ msm_gem_object_set_name(bo, "preempt");
+ msm_gem_object_set_name(counters_bo, "preempt_counters");
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
+
+ ptr->counter = counters_iova;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+
+ return;
+ }
+ }
+
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
new file mode 100644
index 000000000..beea4a7fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -0,0 +1,7780 @@
+#ifndef A6XX_XML
+#define A6XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a6xx_tile_mode {
+ TILE6_LINEAR = 0,
+ TILE6_2 = 2,
+ TILE6_3 = 3,
+};
+
+enum a6xx_format {
+ FMT6_A8_UNORM = 2,
+ FMT6_8_UNORM = 3,
+ FMT6_8_SNORM = 4,
+ FMT6_8_UINT = 5,
+ FMT6_8_SINT = 6,
+ FMT6_4_4_4_4_UNORM = 8,
+ FMT6_5_5_5_1_UNORM = 10,
+ FMT6_1_5_5_5_UNORM = 12,
+ FMT6_5_6_5_UNORM = 14,
+ FMT6_8_8_UNORM = 15,
+ FMT6_8_8_SNORM = 16,
+ FMT6_8_8_UINT = 17,
+ FMT6_8_8_SINT = 18,
+ FMT6_L8_A8_UNORM = 19,
+ FMT6_16_UNORM = 21,
+ FMT6_16_SNORM = 22,
+ FMT6_16_FLOAT = 23,
+ FMT6_16_UINT = 24,
+ FMT6_16_SINT = 25,
+ FMT6_8_8_8_UNORM = 33,
+ FMT6_8_8_8_SNORM = 34,
+ FMT6_8_8_8_UINT = 35,
+ FMT6_8_8_8_SINT = 36,
+ FMT6_8_8_8_8_UNORM = 48,
+ FMT6_8_8_8_X8_UNORM = 49,
+ FMT6_8_8_8_8_SNORM = 50,
+ FMT6_8_8_8_8_UINT = 51,
+ FMT6_8_8_8_8_SINT = 52,
+ FMT6_9_9_9_E5_FLOAT = 53,
+ FMT6_10_10_10_2_UNORM = 54,
+ FMT6_10_10_10_2_UNORM_DEST = 55,
+ FMT6_10_10_10_2_SNORM = 57,
+ FMT6_10_10_10_2_UINT = 58,
+ FMT6_10_10_10_2_SINT = 59,
+ FMT6_11_11_10_FLOAT = 66,
+ FMT6_16_16_UNORM = 67,
+ FMT6_16_16_SNORM = 68,
+ FMT6_16_16_FLOAT = 69,
+ FMT6_16_16_UINT = 70,
+ FMT6_16_16_SINT = 71,
+ FMT6_32_UNORM = 72,
+ FMT6_32_SNORM = 73,
+ FMT6_32_FLOAT = 74,
+ FMT6_32_UINT = 75,
+ FMT6_32_SINT = 76,
+ FMT6_32_FIXED = 77,
+ FMT6_16_16_16_UNORM = 88,
+ FMT6_16_16_16_SNORM = 89,
+ FMT6_16_16_16_FLOAT = 90,
+ FMT6_16_16_16_UINT = 91,
+ FMT6_16_16_16_SINT = 92,
+ FMT6_16_16_16_16_UNORM = 96,
+ FMT6_16_16_16_16_SNORM = 97,
+ FMT6_16_16_16_16_FLOAT = 98,
+ FMT6_16_16_16_16_UINT = 99,
+ FMT6_16_16_16_16_SINT = 100,
+ FMT6_32_32_UNORM = 101,
+ FMT6_32_32_SNORM = 102,
+ FMT6_32_32_FLOAT = 103,
+ FMT6_32_32_UINT = 104,
+ FMT6_32_32_SINT = 105,
+ FMT6_32_32_FIXED = 106,
+ FMT6_32_32_32_UNORM = 112,
+ FMT6_32_32_32_SNORM = 113,
+ FMT6_32_32_32_UINT = 114,
+ FMT6_32_32_32_SINT = 115,
+ FMT6_32_32_32_FLOAT = 116,
+ FMT6_32_32_32_FIXED = 117,
+ FMT6_32_32_32_32_UNORM = 128,
+ FMT6_32_32_32_32_SNORM = 129,
+ FMT6_32_32_32_32_FLOAT = 130,
+ FMT6_32_32_32_32_UINT = 131,
+ FMT6_32_32_32_32_SINT = 132,
+ FMT6_32_32_32_32_FIXED = 133,
+ FMT6_G8R8B8R8_422_UNORM = 140,
+ FMT6_R8G8R8B8_422_UNORM = 141,
+ FMT6_R8_G8B8_2PLANE_420_UNORM = 142,
+ FMT6_NV21 = 143,
+ FMT6_R8_G8_B8_3PLANE_420_UNORM = 144,
+ FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = 145,
+ FMT6_NV12_Y = 148,
+ FMT6_NV12_UV = 149,
+ FMT6_NV12_VU = 150,
+ FMT6_NV12_4R = 151,
+ FMT6_NV12_4R_Y = 152,
+ FMT6_NV12_4R_UV = 153,
+ FMT6_P010 = 154,
+ FMT6_P010_Y = 155,
+ FMT6_P010_UV = 156,
+ FMT6_TP10 = 157,
+ FMT6_TP10_Y = 158,
+ FMT6_TP10_UV = 159,
+ FMT6_Z24_UNORM_S8_UINT = 160,
+ FMT6_ETC2_RG11_UNORM = 171,
+ FMT6_ETC2_RG11_SNORM = 172,
+ FMT6_ETC2_R11_UNORM = 173,
+ FMT6_ETC2_R11_SNORM = 174,
+ FMT6_ETC1 = 175,
+ FMT6_ETC2_RGB8 = 176,
+ FMT6_ETC2_RGBA8 = 177,
+ FMT6_ETC2_RGB8A1 = 178,
+ FMT6_DXT1 = 179,
+ FMT6_DXT3 = 180,
+ FMT6_DXT5 = 181,
+ FMT6_RGTC1_UNORM = 183,
+ FMT6_RGTC1_SNORM = 184,
+ FMT6_RGTC2_UNORM = 187,
+ FMT6_RGTC2_SNORM = 188,
+ FMT6_BPTC_UFLOAT = 190,
+ FMT6_BPTC_FLOAT = 191,
+ FMT6_BPTC = 192,
+ FMT6_ASTC_4x4 = 193,
+ FMT6_ASTC_5x4 = 194,
+ FMT6_ASTC_5x5 = 195,
+ FMT6_ASTC_6x5 = 196,
+ FMT6_ASTC_6x6 = 197,
+ FMT6_ASTC_8x5 = 198,
+ FMT6_ASTC_8x6 = 199,
+ FMT6_ASTC_8x8 = 200,
+ FMT6_ASTC_10x5 = 201,
+ FMT6_ASTC_10x6 = 202,
+ FMT6_ASTC_10x8 = 203,
+ FMT6_ASTC_10x10 = 204,
+ FMT6_ASTC_12x10 = 205,
+ FMT6_ASTC_12x12 = 206,
+ FMT6_Z24_UINT_S8_UINT = 234,
+ FMT6_NONE = 255,
+};
+
+enum a6xx_polygon_mode {
+ POLYMODE6_POINTS = 1,
+ POLYMODE6_LINES = 2,
+ POLYMODE6_TRIANGLES = 3,
+};
+
+enum a6xx_depth_format {
+ DEPTH6_NONE = 0,
+ DEPTH6_16 = 1,
+ DEPTH6_24_8 = 2,
+ DEPTH6_32 = 4,
+};
+
+enum a6xx_shader_id {
+ A6XX_TP0_TMO_DATA = 9,
+ A6XX_TP0_SMO_DATA = 10,
+ A6XX_TP0_MIPMAP_BASE_DATA = 11,
+ A6XX_TP1_TMO_DATA = 25,
+ A6XX_TP1_SMO_DATA = 26,
+ A6XX_TP1_MIPMAP_BASE_DATA = 27,
+ A6XX_SP_INST_DATA = 41,
+ A6XX_SP_LB_0_DATA = 42,
+ A6XX_SP_LB_1_DATA = 43,
+ A6XX_SP_LB_2_DATA = 44,
+ A6XX_SP_LB_3_DATA = 45,
+ A6XX_SP_LB_4_DATA = 46,
+ A6XX_SP_LB_5_DATA = 47,
+ A6XX_SP_CB_BINDLESS_DATA = 48,
+ A6XX_SP_CB_LEGACY_DATA = 49,
+ A6XX_SP_UAV_DATA = 50,
+ A6XX_SP_INST_TAG = 51,
+ A6XX_SP_CB_BINDLESS_TAG = 52,
+ A6XX_SP_TMO_UMO_TAG = 53,
+ A6XX_SP_SMO_TAG = 54,
+ A6XX_SP_STATE_DATA = 55,
+ A6XX_HLSQ_CHUNK_CVS_RAM = 73,
+ A6XX_HLSQ_CHUNK_CPS_RAM = 74,
+ A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
+ A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
+ A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
+ A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
+ A6XX_HLSQ_CVS_MISC_RAM = 80,
+ A6XX_HLSQ_CPS_MISC_RAM = 81,
+ A6XX_HLSQ_INST_RAM = 82,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM = 83,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM = 84,
+ A6XX_HLSQ_CVS_MISC_RAM_TAG = 85,
+ A6XX_HLSQ_CPS_MISC_RAM_TAG = 86,
+ A6XX_HLSQ_INST_RAM_TAG = 87,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
+ A6XX_HLSQ_PWR_REST_RAM = 90,
+ A6XX_HLSQ_PWR_REST_TAG = 91,
+ A6XX_HLSQ_DATAPATH_META = 96,
+ A6XX_HLSQ_FRONTEND_META = 97,
+ A6XX_HLSQ_INDIRECT_META = 98,
+ A6XX_HLSQ_BACKEND_META = 99,
+};
+
+enum a6xx_debugbus_id {
+ A6XX_DBGBUS_CP = 1,
+ A6XX_DBGBUS_RBBM = 2,
+ A6XX_DBGBUS_VBIF = 3,
+ A6XX_DBGBUS_HLSQ = 4,
+ A6XX_DBGBUS_UCHE = 5,
+ A6XX_DBGBUS_DPM = 6,
+ A6XX_DBGBUS_TESS = 7,
+ A6XX_DBGBUS_PC = 8,
+ A6XX_DBGBUS_VFDP = 9,
+ A6XX_DBGBUS_VPC = 10,
+ A6XX_DBGBUS_TSE = 11,
+ A6XX_DBGBUS_RAS = 12,
+ A6XX_DBGBUS_VSC = 13,
+ A6XX_DBGBUS_COM = 14,
+ A6XX_DBGBUS_LRZ = 16,
+ A6XX_DBGBUS_A2D = 17,
+ A6XX_DBGBUS_CCUFCHE = 18,
+ A6XX_DBGBUS_GMU_CX = 19,
+ A6XX_DBGBUS_RBP = 20,
+ A6XX_DBGBUS_DCS = 21,
+ A6XX_DBGBUS_DBGC = 22,
+ A6XX_DBGBUS_CX = 23,
+ A6XX_DBGBUS_GMU_GX = 24,
+ A6XX_DBGBUS_TPFCHE = 25,
+ A6XX_DBGBUS_GBIF_GX = 26,
+ A6XX_DBGBUS_GPC = 29,
+ A6XX_DBGBUS_LARC = 30,
+ A6XX_DBGBUS_HLSQ_SPTP = 31,
+ A6XX_DBGBUS_RB_0 = 32,
+ A6XX_DBGBUS_RB_1 = 33,
+ A6XX_DBGBUS_UCHE_WRAPPER = 36,
+ A6XX_DBGBUS_CCU_0 = 40,
+ A6XX_DBGBUS_CCU_1 = 41,
+ A6XX_DBGBUS_VFD_0 = 56,
+ A6XX_DBGBUS_VFD_1 = 57,
+ A6XX_DBGBUS_VFD_2 = 58,
+ A6XX_DBGBUS_VFD_3 = 59,
+ A6XX_DBGBUS_SP_0 = 64,
+ A6XX_DBGBUS_SP_1 = 65,
+ A6XX_DBGBUS_TPL1_0 = 72,
+ A6XX_DBGBUS_TPL1_1 = 73,
+ A6XX_DBGBUS_TPL1_2 = 74,
+ A6XX_DBGBUS_TPL1_3 = 75,
+};
+
+enum a6xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+ PERF_CP_BUSY_GFX_CORE_IDLE = 1,
+ PERF_CP_BUSY_CYCLES = 2,
+ PERF_CP_NUM_PREEMPTIONS = 3,
+ PERF_CP_PREEMPTION_REACTION_DELAY = 4,
+ PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5,
+ PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6,
+ PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7,
+ PERF_CP_PREDICATED_DRAWS_KILLED = 8,
+ PERF_CP_MODE_SWITCH = 9,
+ PERF_CP_ZPASS_DONE = 10,
+ PERF_CP_CONTEXT_DONE = 11,
+ PERF_CP_CACHE_FLUSH = 12,
+ PERF_CP_LONG_PREEMPTIONS = 13,
+ PERF_CP_SQE_I_CACHE_STARVE = 14,
+ PERF_CP_SQE_IDLE = 15,
+ PERF_CP_SQE_PM4_STARVE_RB_IB = 16,
+ PERF_CP_SQE_PM4_STARVE_SDS = 17,
+ PERF_CP_SQE_MRB_STARVE = 18,
+ PERF_CP_SQE_RRB_STARVE = 19,
+ PERF_CP_SQE_VSD_STARVE = 20,
+ PERF_CP_VSD_DECODE_STARVE = 21,
+ PERF_CP_SQE_PIPE_OUT_STALL = 22,
+ PERF_CP_SQE_SYNC_STALL = 23,
+ PERF_CP_SQE_PM4_WFI_STALL = 24,
+ PERF_CP_SQE_SYS_WFI_STALL = 25,
+ PERF_CP_SQE_T4_EXEC = 26,
+ PERF_CP_SQE_LOAD_STATE_EXEC = 27,
+ PERF_CP_SQE_SAVE_SDS_STATE = 28,
+ PERF_CP_SQE_DRAW_EXEC = 29,
+ PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30,
+ PERF_CP_SQE_EXEC_PROFILED = 31,
+ PERF_CP_MEMORY_POOL_EMPTY = 32,
+ PERF_CP_MEMORY_POOL_SYNC_STALL = 33,
+ PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34,
+ PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35,
+ PERF_CP_AHB_STALL_SQE_GMU = 36,
+ PERF_CP_AHB_STALL_SQE_WR_OTHER = 37,
+ PERF_CP_AHB_STALL_SQE_RD_OTHER = 38,
+ PERF_CP_CLUSTER0_EMPTY = 39,
+ PERF_CP_CLUSTER1_EMPTY = 40,
+ PERF_CP_CLUSTER2_EMPTY = 41,
+ PERF_CP_CLUSTER3_EMPTY = 42,
+ PERF_CP_CLUSTER4_EMPTY = 43,
+ PERF_CP_CLUSTER5_EMPTY = 44,
+ PERF_CP_PM4_DATA = 45,
+ PERF_CP_PM4_HEADERS = 46,
+ PERF_CP_VBIF_READ_BEATS = 47,
+ PERF_CP_VBIF_WRITE_BEATS = 48,
+ PERF_CP_SQE_INSTR_COUNTER = 49,
+};
+
+enum a6xx_rbbm_perfcounter_select {
+ PERF_RBBM_ALWAYS_COUNT = 0,
+ PERF_RBBM_ALWAYS_ON = 1,
+ PERF_RBBM_TSE_BUSY = 2,
+ PERF_RBBM_RAS_BUSY = 3,
+ PERF_RBBM_PC_DCALL_BUSY = 4,
+ PERF_RBBM_PC_VSD_BUSY = 5,
+ PERF_RBBM_STATUS_MASKED = 6,
+ PERF_RBBM_COM_BUSY = 7,
+ PERF_RBBM_DCOM_BUSY = 8,
+ PERF_RBBM_VBIF_BUSY = 9,
+ PERF_RBBM_VSC_BUSY = 10,
+ PERF_RBBM_TESS_BUSY = 11,
+ PERF_RBBM_UCHE_BUSY = 12,
+ PERF_RBBM_HLSQ_BUSY = 13,
+};
+
+enum a6xx_pc_perfcounter_select {
+ PERF_PC_BUSY_CYCLES = 0,
+ PERF_PC_WORKING_CYCLES = 1,
+ PERF_PC_STALL_CYCLES_VFD = 2,
+ PERF_PC_STALL_CYCLES_TSE = 3,
+ PERF_PC_STALL_CYCLES_VPC = 4,
+ PERF_PC_STALL_CYCLES_UCHE = 5,
+ PERF_PC_STALL_CYCLES_TESS = 6,
+ PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
+ PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
+ PERF_PC_PASS1_TF_STALL_CYCLES = 9,
+ PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
+ PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
+ PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
+ PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
+ PERF_PC_STARVE_CYCLES_DI = 14,
+ PERF_PC_VIS_STREAMS_LOADED = 15,
+ PERF_PC_INSTANCES = 16,
+ PERF_PC_VPC_PRIMITIVES = 17,
+ PERF_PC_DEAD_PRIM = 18,
+ PERF_PC_LIVE_PRIM = 19,
+ PERF_PC_VERTEX_HITS = 20,
+ PERF_PC_IA_VERTICES = 21,
+ PERF_PC_IA_PRIMITIVES = 22,
+ PERF_PC_GS_PRIMITIVES = 23,
+ PERF_PC_HS_INVOCATIONS = 24,
+ PERF_PC_DS_INVOCATIONS = 25,
+ PERF_PC_VS_INVOCATIONS = 26,
+ PERF_PC_GS_INVOCATIONS = 27,
+ PERF_PC_DS_PRIMITIVES = 28,
+ PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
+ PERF_PC_3D_DRAWCALLS = 30,
+ PERF_PC_2D_DRAWCALLS = 31,
+ PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
+ PERF_TESS_BUSY_CYCLES = 33,
+ PERF_TESS_WORKING_CYCLES = 34,
+ PERF_TESS_STALL_CYCLES_PC = 35,
+ PERF_TESS_STARVE_CYCLES_PC = 36,
+ PERF_PC_TSE_TRANSACTION = 37,
+ PERF_PC_TSE_VERTEX = 38,
+ PERF_PC_TESS_PC_UV_TRANS = 39,
+ PERF_PC_TESS_PC_UV_PATCHES = 40,
+ PERF_PC_TESS_FACTOR_TRANS = 41,
+};
+
+enum a6xx_vfd_perfcounter_select {
+ PERF_VFD_BUSY_CYCLES = 0,
+ PERF_VFD_STALL_CYCLES_UCHE = 1,
+ PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
+ PERF_VFD_STALL_CYCLES_SP_INFO = 3,
+ PERF_VFD_STALL_CYCLES_SP_ATTR = 4,
+ PERF_VFD_STARVE_CYCLES_UCHE = 5,
+ PERF_VFD_RBUFFER_FULL = 6,
+ PERF_VFD_ATTR_INFO_FIFO_FULL = 7,
+ PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8,
+ PERF_VFD_NUM_ATTRIBUTES = 9,
+ PERF_VFD_UPPER_SHADER_FIBERS = 10,
+ PERF_VFD_LOWER_SHADER_FIBERS = 11,
+ PERF_VFD_MODE_0_FIBERS = 12,
+ PERF_VFD_MODE_1_FIBERS = 13,
+ PERF_VFD_MODE_2_FIBERS = 14,
+ PERF_VFD_MODE_3_FIBERS = 15,
+ PERF_VFD_MODE_4_FIBERS = 16,
+ PERF_VFD_TOTAL_VERTICES = 17,
+ PERF_VFDP_STALL_CYCLES_VFD = 18,
+ PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19,
+ PERF_VFDP_STALL_CYCLES_VFD_PROG = 20,
+ PERF_VFDP_STARVE_CYCLES_PC = 21,
+ PERF_VFDP_VS_STAGE_WAVES = 22,
+};
+
+enum a6xx_hlsq_perfcounter_select {
+ PERF_HLSQ_BUSY_CYCLES = 0,
+ PERF_HLSQ_STALL_CYCLES_UCHE = 1,
+ PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
+ PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
+ PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
+ PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
+ PERF_HLSQ_FS_STAGE_1X_WAVES = 6,
+ PERF_HLSQ_FS_STAGE_2X_WAVES = 7,
+ PERF_HLSQ_QUADS = 8,
+ PERF_HLSQ_CS_INVOCATIONS = 9,
+ PERF_HLSQ_COMPUTE_DRAWCALLS = 10,
+ PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11,
+ PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12,
+ PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13,
+ PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14,
+ PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15,
+ PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16,
+ PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17,
+ PERF_HLSQ_STALL_CYCLES_VPC = 18,
+ PERF_HLSQ_PIXELS = 19,
+ PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20,
+};
+
+enum a6xx_vpc_perfcounter_select {
+ PERF_VPC_BUSY_CYCLES = 0,
+ PERF_VPC_WORKING_CYCLES = 1,
+ PERF_VPC_STALL_CYCLES_UCHE = 2,
+ PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
+ PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
+ PERF_VPC_STALL_CYCLES_PC = 5,
+ PERF_VPC_STALL_CYCLES_SP_LM = 6,
+ PERF_VPC_STARVE_CYCLES_SP = 7,
+ PERF_VPC_STARVE_CYCLES_LRZ = 8,
+ PERF_VPC_PC_PRIMITIVES = 9,
+ PERF_VPC_SP_COMPONENTS = 10,
+ PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11,
+ PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12,
+ PERF_VPC_RB_VISIBLE_PRIMITIVES = 13,
+ PERF_VPC_LM_TRANSACTION = 14,
+ PERF_VPC_STREAMOUT_TRANSACTION = 15,
+ PERF_VPC_VS_BUSY_CYCLES = 16,
+ PERF_VPC_PS_BUSY_CYCLES = 17,
+ PERF_VPC_VS_WORKING_CYCLES = 18,
+ PERF_VPC_PS_WORKING_CYCLES = 19,
+ PERF_VPC_STARVE_CYCLES_RB = 20,
+ PERF_VPC_NUM_VPCRAM_READ_POS = 21,
+ PERF_VPC_WIT_FULL_CYCLES = 22,
+ PERF_VPC_VPCRAM_FULL_CYCLES = 23,
+ PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24,
+ PERF_VPC_NUM_VPCRAM_WRITE = 25,
+ PERF_VPC_NUM_VPCRAM_READ_SO = 26,
+ PERF_VPC_NUM_ATTR_REQ_LM = 27,
+};
+
+enum a6xx_tse_perfcounter_select {
+ PERF_TSE_BUSY_CYCLES = 0,
+ PERF_TSE_CLIPPING_CYCLES = 1,
+ PERF_TSE_STALL_CYCLES_RAS = 2,
+ PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
+ PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
+ PERF_TSE_STARVE_CYCLES_PC = 5,
+ PERF_TSE_INPUT_PRIM = 6,
+ PERF_TSE_INPUT_NULL_PRIM = 7,
+ PERF_TSE_TRIVAL_REJ_PRIM = 8,
+ PERF_TSE_CLIPPED_PRIM = 9,
+ PERF_TSE_ZERO_AREA_PRIM = 10,
+ PERF_TSE_FACENESS_CULLED_PRIM = 11,
+ PERF_TSE_ZERO_PIXEL_PRIM = 12,
+ PERF_TSE_OUTPUT_NULL_PRIM = 13,
+ PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
+ PERF_TSE_CINVOCATION = 15,
+ PERF_TSE_CPRIMITIVES = 16,
+ PERF_TSE_2D_INPUT_PRIM = 17,
+ PERF_TSE_2D_ALIVE_CYCLES = 18,
+ PERF_TSE_CLIP_PLANES = 19,
+};
+
+enum a6xx_ras_perfcounter_select {
+ PERF_RAS_BUSY_CYCLES = 0,
+ PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
+ PERF_RAS_STALL_CYCLES_LRZ = 2,
+ PERF_RAS_STARVE_CYCLES_TSE = 3,
+ PERF_RAS_SUPER_TILES = 4,
+ PERF_RAS_8X4_TILES = 5,
+ PERF_RAS_MASKGEN_ACTIVE = 6,
+ PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
+ PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
+ PERF_RAS_PRIM_KILLED_INVISILBE = 9,
+ PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10,
+ PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11,
+ PERF_RAS_BLOCKS = 12,
+};
+
+enum a6xx_uche_perfcounter_select {
+ PERF_UCHE_BUSY_CYCLES = 0,
+ PERF_UCHE_STALL_CYCLES_ARBITER = 1,
+ PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
+ PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
+ PERF_UCHE_VBIF_READ_BEATS_TP = 4,
+ PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
+ PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
+ PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
+ PERF_UCHE_VBIF_READ_BEATS_SP = 8,
+ PERF_UCHE_READ_REQUESTS_TP = 9,
+ PERF_UCHE_READ_REQUESTS_VFD = 10,
+ PERF_UCHE_READ_REQUESTS_HLSQ = 11,
+ PERF_UCHE_READ_REQUESTS_LRZ = 12,
+ PERF_UCHE_READ_REQUESTS_SP = 13,
+ PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
+ PERF_UCHE_WRITE_REQUESTS_SP = 15,
+ PERF_UCHE_WRITE_REQUESTS_VPC = 16,
+ PERF_UCHE_WRITE_REQUESTS_VSC = 17,
+ PERF_UCHE_EVICTS = 18,
+ PERF_UCHE_BANK_REQ0 = 19,
+ PERF_UCHE_BANK_REQ1 = 20,
+ PERF_UCHE_BANK_REQ2 = 21,
+ PERF_UCHE_BANK_REQ3 = 22,
+ PERF_UCHE_BANK_REQ4 = 23,
+ PERF_UCHE_BANK_REQ5 = 24,
+ PERF_UCHE_BANK_REQ6 = 25,
+ PERF_UCHE_BANK_REQ7 = 26,
+ PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
+ PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
+ PERF_UCHE_GMEM_READ_BEATS = 29,
+ PERF_UCHE_TPH_REF_FULL = 30,
+ PERF_UCHE_TPH_VICTIM_FULL = 31,
+ PERF_UCHE_TPH_EXT_FULL = 32,
+ PERF_UCHE_VBIF_STALL_WRITE_DATA = 33,
+ PERF_UCHE_DCMP_LATENCY_SAMPLES = 34,
+ PERF_UCHE_DCMP_LATENCY_CYCLES = 35,
+ PERF_UCHE_VBIF_READ_BEATS_PC = 36,
+ PERF_UCHE_READ_REQUESTS_PC = 37,
+ PERF_UCHE_RAM_READ_REQ = 38,
+ PERF_UCHE_RAM_WRITE_REQ = 39,
+};
+
+enum a6xx_tp_perfcounter_select {
+ PERF_TP_BUSY_CYCLES = 0,
+ PERF_TP_STALL_CYCLES_UCHE = 1,
+ PERF_TP_LATENCY_CYCLES = 2,
+ PERF_TP_LATENCY_TRANS = 3,
+ PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
+ PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
+ PERF_TP_L1_CACHELINE_REQUESTS = 6,
+ PERF_TP_L1_CACHELINE_MISSES = 7,
+ PERF_TP_SP_TP_TRANS = 8,
+ PERF_TP_TP_SP_TRANS = 9,
+ PERF_TP_OUTPUT_PIXELS = 10,
+ PERF_TP_FILTER_WORKLOAD_16BIT = 11,
+ PERF_TP_FILTER_WORKLOAD_32BIT = 12,
+ PERF_TP_QUADS_RECEIVED = 13,
+ PERF_TP_QUADS_OFFSET = 14,
+ PERF_TP_QUADS_SHADOW = 15,
+ PERF_TP_QUADS_ARRAY = 16,
+ PERF_TP_QUADS_GRADIENT = 17,
+ PERF_TP_QUADS_1D = 18,
+ PERF_TP_QUADS_2D = 19,
+ PERF_TP_QUADS_BUFFER = 20,
+ PERF_TP_QUADS_3D = 21,
+ PERF_TP_QUADS_CUBE = 22,
+ PERF_TP_DIVERGENT_QUADS_RECEIVED = 23,
+ PERF_TP_PRT_NON_RESIDENT_EVENTS = 24,
+ PERF_TP_OUTPUT_PIXELS_POINT = 25,
+ PERF_TP_OUTPUT_PIXELS_BILINEAR = 26,
+ PERF_TP_OUTPUT_PIXELS_MIP = 27,
+ PERF_TP_OUTPUT_PIXELS_ANISO = 28,
+ PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29,
+ PERF_TP_FLAG_CACHE_REQUESTS = 30,
+ PERF_TP_FLAG_CACHE_MISSES = 31,
+ PERF_TP_L1_5_L2_REQUESTS = 32,
+ PERF_TP_2D_OUTPUT_PIXELS = 33,
+ PERF_TP_2D_OUTPUT_PIXELS_POINT = 34,
+ PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35,
+ PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36,
+ PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37,
+ PERF_TP_TPA2TPC_TRANS = 38,
+ PERF_TP_L1_MISSES_ASTC_1TILE = 39,
+ PERF_TP_L1_MISSES_ASTC_2TILE = 40,
+ PERF_TP_L1_MISSES_ASTC_4TILE = 41,
+ PERF_TP_L1_5_L2_COMPRESS_REQS = 42,
+ PERF_TP_L1_5_L2_COMPRESS_MISS = 43,
+ PERF_TP_L1_BANK_CONFLICT = 44,
+ PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45,
+ PERF_TP_L1_5_MISS_LATENCY_TRANS = 46,
+ PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47,
+ PERF_TP_FRONTEND_WORKING_CYCLES = 48,
+ PERF_TP_L1_TAG_WORKING_CYCLES = 49,
+ PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50,
+ PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51,
+ PERF_TP_BACKEND_WORKING_CYCLES = 52,
+ PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53,
+ PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54,
+ PERF_TP_STARVE_CYCLES_SP = 55,
+ PERF_TP_STARVE_CYCLES_UCHE = 56,
+};
+
+enum a6xx_sp_perfcounter_select {
+ PERF_SP_BUSY_CYCLES = 0,
+ PERF_SP_ALU_WORKING_CYCLES = 1,
+ PERF_SP_EFU_WORKING_CYCLES = 2,
+ PERF_SP_STALL_CYCLES_VPC = 3,
+ PERF_SP_STALL_CYCLES_TP = 4,
+ PERF_SP_STALL_CYCLES_UCHE = 5,
+ PERF_SP_STALL_CYCLES_RB = 6,
+ PERF_SP_NON_EXECUTION_CYCLES = 7,
+ PERF_SP_WAVE_CONTEXTS = 8,
+ PERF_SP_WAVE_CONTEXT_CYCLES = 9,
+ PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
+ PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
+ PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
+ PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
+ PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
+ PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
+ PERF_SP_WAVE_CTRL_CYCLES = 16,
+ PERF_SP_WAVE_LOAD_CYCLES = 17,
+ PERF_SP_WAVE_EMIT_CYCLES = 18,
+ PERF_SP_WAVE_NOP_CYCLES = 19,
+ PERF_SP_WAVE_WAIT_CYCLES = 20,
+ PERF_SP_WAVE_FETCH_CYCLES = 21,
+ PERF_SP_WAVE_IDLE_CYCLES = 22,
+ PERF_SP_WAVE_END_CYCLES = 23,
+ PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
+ PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
+ PERF_SP_WAVE_JOIN_CYCLES = 26,
+ PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
+ PERF_SP_LM_STORE_INSTRUCTIONS = 28,
+ PERF_SP_LM_ATOMICS = 29,
+ PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
+ PERF_SP_GM_STORE_INSTRUCTIONS = 31,
+ PERF_SP_GM_ATOMICS = 32,
+ PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
+ PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34,
+ PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35,
+ PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36,
+ PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37,
+ PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38,
+ PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39,
+ PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40,
+ PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41,
+ PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42,
+ PERF_SP_VS_INSTRUCTIONS = 43,
+ PERF_SP_FS_INSTRUCTIONS = 44,
+ PERF_SP_ADDR_LOCK_COUNT = 45,
+ PERF_SP_UCHE_READ_TRANS = 46,
+ PERF_SP_UCHE_WRITE_TRANS = 47,
+ PERF_SP_EXPORT_VPC_TRANS = 48,
+ PERF_SP_EXPORT_RB_TRANS = 49,
+ PERF_SP_PIXELS_KILLED = 50,
+ PERF_SP_ICL1_REQUESTS = 51,
+ PERF_SP_ICL1_MISSES = 52,
+ PERF_SP_HS_INSTRUCTIONS = 53,
+ PERF_SP_DS_INSTRUCTIONS = 54,
+ PERF_SP_GS_INSTRUCTIONS = 55,
+ PERF_SP_CS_INSTRUCTIONS = 56,
+ PERF_SP_GPR_READ = 57,
+ PERF_SP_GPR_WRITE = 58,
+ PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59,
+ PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60,
+ PERF_SP_LM_BANK_CONFLICTS = 61,
+ PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62,
+ PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63,
+ PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64,
+ PERF_SP_LM_WORKING_CYCLES = 65,
+ PERF_SP_DISPATCHER_WORKING_CYCLES = 66,
+ PERF_SP_SEQUENCER_WORKING_CYCLES = 67,
+ PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68,
+ PERF_SP_STARVE_CYCLES_HLSQ = 69,
+ PERF_SP_NON_EXECUTION_LS_CYCLES = 70,
+ PERF_SP_WORKING_EU = 71,
+ PERF_SP_ANY_EU_WORKING = 72,
+ PERF_SP_WORKING_EU_FS_STAGE = 73,
+ PERF_SP_ANY_EU_WORKING_FS_STAGE = 74,
+ PERF_SP_WORKING_EU_VS_STAGE = 75,
+ PERF_SP_ANY_EU_WORKING_VS_STAGE = 76,
+ PERF_SP_WORKING_EU_CS_STAGE = 77,
+ PERF_SP_ANY_EU_WORKING_CS_STAGE = 78,
+ PERF_SP_GPR_READ_PREFETCH = 79,
+ PERF_SP_GPR_READ_CONFLICT = 80,
+ PERF_SP_GPR_WRITE_CONFLICT = 81,
+ PERF_SP_GM_LOAD_LATENCY_CYCLES = 82,
+ PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83,
+ PERF_SP_EXECUTABLE_WAVES = 84,
+};
+
+enum a6xx_rb_perfcounter_select {
+ PERF_RB_BUSY_CYCLES = 0,
+ PERF_RB_STALL_CYCLES_HLSQ = 1,
+ PERF_RB_STALL_CYCLES_FIFO0_FULL = 2,
+ PERF_RB_STALL_CYCLES_FIFO1_FULL = 3,
+ PERF_RB_STALL_CYCLES_FIFO2_FULL = 4,
+ PERF_RB_STARVE_CYCLES_SP = 5,
+ PERF_RB_STARVE_CYCLES_LRZ_TILE = 6,
+ PERF_RB_STARVE_CYCLES_CCU = 7,
+ PERF_RB_STARVE_CYCLES_Z_PLANE = 8,
+ PERF_RB_STARVE_CYCLES_BARY_PLANE = 9,
+ PERF_RB_Z_WORKLOAD = 10,
+ PERF_RB_HLSQ_ACTIVE = 11,
+ PERF_RB_Z_READ = 12,
+ PERF_RB_Z_WRITE = 13,
+ PERF_RB_C_READ = 14,
+ PERF_RB_C_WRITE = 15,
+ PERF_RB_TOTAL_PASS = 16,
+ PERF_RB_Z_PASS = 17,
+ PERF_RB_Z_FAIL = 18,
+ PERF_RB_S_FAIL = 19,
+ PERF_RB_BLENDED_FXP_COMPONENTS = 20,
+ PERF_RB_BLENDED_FP16_COMPONENTS = 21,
+ PERF_RB_PS_INVOCATIONS = 22,
+ PERF_RB_2D_ALIVE_CYCLES = 23,
+ PERF_RB_2D_STALL_CYCLES_A2D = 24,
+ PERF_RB_2D_STARVE_CYCLES_SRC = 25,
+ PERF_RB_2D_STARVE_CYCLES_SP = 26,
+ PERF_RB_2D_STARVE_CYCLES_DST = 27,
+ PERF_RB_2D_VALID_PIXELS = 28,
+ PERF_RB_3D_PIXELS = 29,
+ PERF_RB_BLENDER_WORKING_CYCLES = 30,
+ PERF_RB_ZPROC_WORKING_CYCLES = 31,
+ PERF_RB_CPROC_WORKING_CYCLES = 32,
+ PERF_RB_SAMPLER_WORKING_CYCLES = 33,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37,
+ PERF_RB_STALL_CYCLES_VPC = 38,
+ PERF_RB_2D_INPUT_TRANS = 39,
+ PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40,
+ PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41,
+ PERF_RB_BLENDED_FP32_COMPONENTS = 42,
+ PERF_RB_COLOR_PIX_TILES = 43,
+ PERF_RB_STALL_CYCLES_CCU = 44,
+ PERF_RB_EARLY_Z_ARB3_GRANT = 45,
+ PERF_RB_LATE_Z_ARB3_GRANT = 46,
+ PERF_RB_EARLY_Z_SKIP_GRANT = 47,
+};
+
+enum a6xx_vsc_perfcounter_select {
+ PERF_VSC_BUSY_CYCLES = 0,
+ PERF_VSC_WORKING_CYCLES = 1,
+ PERF_VSC_STALL_CYCLES_UCHE = 2,
+ PERF_VSC_EOT_NUM = 3,
+ PERF_VSC_INPUT_TILES = 4,
+};
+
+enum a6xx_ccu_perfcounter_select {
+ PERF_CCU_BUSY_CYCLES = 0,
+ PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
+ PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
+ PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
+ PERF_CCU_DEPTH_BLOCKS = 4,
+ PERF_CCU_COLOR_BLOCKS = 5,
+ PERF_CCU_DEPTH_BLOCK_HIT = 6,
+ PERF_CCU_COLOR_BLOCK_HIT = 7,
+ PERF_CCU_PARTIAL_BLOCK_READ = 8,
+ PERF_CCU_GMEM_READ = 9,
+ PERF_CCU_GMEM_WRITE = 10,
+ PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
+ PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
+ PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
+ PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
+ PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
+ PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16,
+ PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17,
+ PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18,
+ PERF_CCU_COLOR_READ_FLAG0_COUNT = 19,
+ PERF_CCU_COLOR_READ_FLAG1_COUNT = 20,
+ PERF_CCU_COLOR_READ_FLAG2_COUNT = 21,
+ PERF_CCU_COLOR_READ_FLAG3_COUNT = 22,
+ PERF_CCU_COLOR_READ_FLAG4_COUNT = 23,
+ PERF_CCU_COLOR_READ_FLAG5_COUNT = 24,
+ PERF_CCU_COLOR_READ_FLAG6_COUNT = 25,
+ PERF_CCU_COLOR_READ_FLAG8_COUNT = 26,
+ PERF_CCU_2D_RD_REQ = 27,
+ PERF_CCU_2D_WR_REQ = 28,
+};
+
+enum a6xx_lrz_perfcounter_select {
+ PERF_LRZ_BUSY_CYCLES = 0,
+ PERF_LRZ_STARVE_CYCLES_RAS = 1,
+ PERF_LRZ_STALL_CYCLES_RB = 2,
+ PERF_LRZ_STALL_CYCLES_VSC = 3,
+ PERF_LRZ_STALL_CYCLES_VPC = 4,
+ PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
+ PERF_LRZ_STALL_CYCLES_UCHE = 6,
+ PERF_LRZ_LRZ_READ = 7,
+ PERF_LRZ_LRZ_WRITE = 8,
+ PERF_LRZ_READ_LATENCY = 9,
+ PERF_LRZ_MERGE_CACHE_UPDATING = 10,
+ PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
+ PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
+ PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
+ PERF_LRZ_FULL_8X8_TILES = 14,
+ PERF_LRZ_PARTIAL_8X8_TILES = 15,
+ PERF_LRZ_TILE_KILLED = 16,
+ PERF_LRZ_TOTAL_PIXEL = 17,
+ PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
+ PERF_LRZ_FULLY_COVERED_TILES = 19,
+ PERF_LRZ_PARTIAL_COVERED_TILES = 20,
+ PERF_LRZ_FEEDBACK_ACCEPT = 21,
+ PERF_LRZ_FEEDBACK_DISCARD = 22,
+ PERF_LRZ_FEEDBACK_STALL = 23,
+ PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24,
+ PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25,
+ PERF_LRZ_STALL_CYCLES_VC = 26,
+ PERF_LRZ_RAS_MASK_TRANS = 27,
+};
+
+enum a6xx_cmp_perfcounter_select {
+ PERF_CMPDECMP_STALL_CYCLES_ARB = 0,
+ PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
+ PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
+ PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
+ PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
+ PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
+ PERF_CMPDECMP_VBIF_READ_DATA = 7,
+ PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
+ PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
+ PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27,
+ PERF_CMPDECMP_2D_RD_DATA = 28,
+ PERF_CMPDECMP_2D_WR_DATA = 29,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31,
+ PERF_CMPDECMP_2D_OUTPUT_TRANS = 32,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35,
+ PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36,
+ PERF_CMPDECMP_2D_BUSY_CYCLES = 37,
+ PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38,
+ PERF_CMPDECMP_2D_PIXELS = 39,
+};
+
+enum a6xx_2d_ifmt {
+ R2D_UNORM8 = 16,
+ R2D_INT32 = 7,
+ R2D_INT16 = 6,
+ R2D_INT8 = 5,
+ R2D_FLOAT32 = 4,
+ R2D_FLOAT16 = 3,
+ R2D_UNORM8_SRGB = 1,
+ R2D_RAW = 0,
+};
+
+enum a6xx_ztest_mode {
+ A6XX_EARLY_Z = 0,
+ A6XX_LATE_Z = 1,
+ A6XX_EARLY_LRZ_LATE_Z = 2,
+};
+
+enum a6xx_sequenced_thread_dist {
+ DIST_SCREEN_COORD = 0,
+ DIST_ALL_TO_RB0 = 1,
+};
+
+enum a6xx_single_prim_mode {
+ NO_FLUSH = 0,
+ FLUSH_PER_OVERLAP_AND_OVERWRITE = 1,
+ FLUSH_PER_OVERLAP = 3,
+};
+
+enum a6xx_raster_mode {
+ TYPE_TILED = 0,
+ TYPE_WRITER = 1,
+};
+
+enum a6xx_raster_direction {
+ LR_TB = 0,
+ RL_TB = 1,
+ LR_BT = 2,
+ RB_BT = 3,
+};
+
+enum a6xx_render_mode {
+ RENDERING_PASS = 0,
+ BINNING_PASS = 1,
+};
+
+enum a6xx_buffers_location {
+ BUFFERS_IN_GMEM = 0,
+ BUFFERS_IN_SYSMEM = 3,
+};
+
+enum a6xx_fragcoord_sample_mode {
+ FRAGCOORD_CENTER = 0,
+ FRAGCOORD_SAMPLE = 3,
+};
+
+enum a6xx_rotation {
+ ROTATE_0 = 0,
+ ROTATE_90 = 1,
+ ROTATE_180 = 2,
+ ROTATE_270 = 3,
+ ROTATE_HFLIP = 4,
+ ROTATE_VFLIP = 5,
+};
+
+enum a6xx_tess_spacing {
+ TESS_EQUAL = 0,
+ TESS_FRACTIONAL_ODD = 2,
+ TESS_FRACTIONAL_EVEN = 3,
+};
+
+enum a6xx_tess_output {
+ TESS_POINTS = 0,
+ TESS_LINES = 1,
+ TESS_CW_TRIS = 2,
+ TESS_CCW_TRIS = 3,
+};
+
+enum a6xx_threadsize {
+ THREAD64 = 0,
+ THREAD128 = 1,
+};
+
+enum a6xx_isam_mode {
+ ISAMMODE_GL = 2,
+};
+
+enum a6xx_tex_filter {
+ A6XX_TEX_NEAREST = 0,
+ A6XX_TEX_LINEAR = 1,
+ A6XX_TEX_ANISO = 2,
+ A6XX_TEX_CUBIC = 3,
+};
+
+enum a6xx_tex_clamp {
+ A6XX_TEX_REPEAT = 0,
+ A6XX_TEX_CLAMP_TO_EDGE = 1,
+ A6XX_TEX_MIRROR_REPEAT = 2,
+ A6XX_TEX_CLAMP_TO_BORDER = 3,
+ A6XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a6xx_tex_aniso {
+ A6XX_TEX_ANISO_1 = 0,
+ A6XX_TEX_ANISO_2 = 1,
+ A6XX_TEX_ANISO_4 = 2,
+ A6XX_TEX_ANISO_8 = 3,
+ A6XX_TEX_ANISO_16 = 4,
+};
+
+enum a6xx_reduction_mode {
+ A6XX_REDUCTION_MODE_AVERAGE = 0,
+ A6XX_REDUCTION_MODE_MIN = 1,
+ A6XX_REDUCTION_MODE_MAX = 2,
+};
+
+enum a6xx_tex_swiz {
+ A6XX_TEX_X = 0,
+ A6XX_TEX_Y = 1,
+ A6XX_TEX_Z = 2,
+ A6XX_TEX_W = 3,
+ A6XX_TEX_ZERO = 4,
+ A6XX_TEX_ONE = 5,
+};
+
+enum a6xx_tex_type {
+ A6XX_TEX_1D = 0,
+ A6XX_TEX_2D = 1,
+ A6XX_TEX_CUBE = 2,
+ A6XX_TEX_3D = 3,
+ A6XX_TEX_BUFFER = 4,
+};
+
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000
+#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080
+#define REG_A6XX_CP_RB_BASE 0x00000800
+
+#define REG_A6XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A6XX_CP_RB_CNTL 0x00000802
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_LO 0x00000804
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A6XX_CP_RB_RPTR 0x00000806
+
+#define REG_A6XX_CP_RB_WPTR 0x00000807
+
+#define REG_A6XX_CP_SQE_CNTL 0x00000808
+
+#define REG_A6XX_CP_CP2GMU_STATUS 0x00000812
+#define A6XX_CP_CP2GMU_STATUS_IFPC 0x00000001
+
+#define REG_A6XX_CP_HW_FAULT 0x00000821
+
+#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
+
+#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
+
+#define REG_A6XX_CP_SQE_INSTR_BASE 0x00000830
+
+#define REG_A6XX_CP_MISC_CNTL 0x00000840
+
+#define REG_A6XX_CP_APRIV_CNTL 0x00000844
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK 0x000000ff
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_LO(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK 0x0000ff00
+#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT 8
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_HI(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK 0xff000000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT 24
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK 0x000001ff
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK 0xffff0000
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK;
+}
+
+#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
+
+#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
+
+#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842
+
+#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
+
+#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
+
+static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000
+#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18
+static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A6XX_CP_PROTECT_REG_READ 0x80000000
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x000008a1
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x000008a2
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x000008a3
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x000008a4
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x000008a5
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x000008a6
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x000008a7
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8
+
+static inline uint32_t REG_A6XX_CP_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008d0 + 0x1*i0; }
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_HI 0x00000901
+
+#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902
+
+#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903
+
+#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908
+
+#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909
+
+#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a
+
+#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b
+
+#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c
+
+#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d
+
+#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e
+
+#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911
+
+#define REG_A6XX_CP_IB1_BASE 0x00000928
+
+#define REG_A6XX_CP_IB1_BASE_HI 0x00000929
+
+#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a
+
+#define REG_A6XX_CP_IB2_BASE 0x0000092b
+
+#define REG_A6XX_CP_IB2_BASE_HI 0x0000092c
+
+#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
+
+#define REG_A6XX_CP_SDS_BASE 0x0000092e
+
+#define REG_A6XX_CP_SDS_BASE_HI 0x0000092f
+
+#define REG_A6XX_CP_SDS_REM_SIZE 0x00000930
+
+#define REG_A6XX_CP_MRB_BASE 0x00000931
+
+#define REG_A6XX_CP_MRB_BASE_HI 0x00000932
+
+#define REG_A6XX_CP_MRB_REM_SIZE 0x00000933
+
+#define REG_A6XX_CP_VSD_BASE 0x00000934
+
+#define REG_A6XX_CP_VSD_BASE_HI 0x00000935
+
+#define REG_A6XX_CP_MRB_DWORDS 0x00000946
+
+#define REG_A6XX_CP_VSD_DWORDS 0x00000947
+
+#define REG_A6XX_CP_CSQ_IB1_STAT 0x00000949
+#define A6XX_CP_CSQ_IB1_STAT_REM__MASK 0xffff0000
+#define A6XX_CP_CSQ_IB1_STAT_REM__SHIFT 16
+static inline uint32_t A6XX_CP_CSQ_IB1_STAT_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_CSQ_IB1_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB1_STAT_REM__MASK;
+}
+
+#define REG_A6XX_CP_CSQ_IB2_STAT 0x0000094a
+#define A6XX_CP_CSQ_IB2_STAT_REM__MASK 0xffff0000
+#define A6XX_CP_CSQ_IB2_STAT_REM__SHIFT 16
+static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_CSQ_IB2_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB2_STAT_REM__MASK;
+}
+
+#define REG_A6XX_CP_MRQ_MRB_STAT 0x0000094c
+#define A6XX_CP_MRQ_MRB_STAT_REM__MASK 0xffff0000
+#define A6XX_CP_MRQ_MRB_STAT_REM__SHIFT 16
+static inline uint32_t A6XX_CP_MRQ_MRB_STAT_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_MRQ_MRB_STAT_REM__SHIFT) & A6XX_CP_MRQ_MRB_STAT_REM__MASK;
+}
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981
+
+#define REG_A6XX_CP_AHB_CNTL 0x0000098d
+
+#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
+
+#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+
+#define REG_A6XX_CP_LPAC_PROG_FIFO_SIZE 0x00000b34
+
+#define REG_A6XX_CP_LPAC_SQE_INSTR_BASE 0x00000b82
+
+#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
+
+#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
+
+#define REG_A6XX_RBBM_STATUS 0x00000210
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
+#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000
+#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000
+#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000
+#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000
+#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000
+#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000
+#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000
+#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000
+#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000
+#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000
+#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800
+#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400
+#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200
+#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100
+#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080
+#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040
+#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020
+#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010
+#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008
+#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
+
+#define REG_A6XX_RBBM_STATUS3 0x00000213
+#define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
+
+#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000400 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000041c + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000424 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000434 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000444 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000450 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000045c + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000466 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000046e + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000476 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000048e + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000004a6 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000004d6 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000004e6 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000004ea + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000004f2 + 0x2*i0; }
+
+#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00000507 + 0x1*i0; }
+
+#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
+
+#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD 0x0000050e
+
+#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS 0x0000050f
+
+#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A6XX_RBBM_PRIMCTR_0_LO 0x00000540
+
+#define REG_A6XX_RBBM_PRIMCTR_0_HI 0x00000541
+
+#define REG_A6XX_RBBM_PRIMCTR_1_LO 0x00000542
+
+#define REG_A6XX_RBBM_PRIMCTR_1_HI 0x00000543
+
+#define REG_A6XX_RBBM_PRIMCTR_2_LO 0x00000544
+
+#define REG_A6XX_RBBM_PRIMCTR_2_HI 0x00000545
+
+#define REG_A6XX_RBBM_PRIMCTR_3_LO 0x00000546
+
+#define REG_A6XX_RBBM_PRIMCTR_3_HI 0x00000547
+
+#define REG_A6XX_RBBM_PRIMCTR_4_LO 0x00000548
+
+#define REG_A6XX_RBBM_PRIMCTR_4_HI 0x00000549
+
+#define REG_A6XX_RBBM_PRIMCTR_5_LO 0x0000054a
+
+#define REG_A6XX_RBBM_PRIMCTR_5_HI 0x0000054b
+
+#define REG_A6XX_RBBM_PRIMCTR_6_LO 0x0000054c
+
+#define REG_A6XX_RBBM_PRIMCTR_6_HI 0x0000054d
+
+#define REG_A6XX_RBBM_PRIMCTR_7_LO 0x0000054e
+
+#define REG_A6XX_RBBM_PRIMCTR_7_HI 0x0000054f
+
+#define REG_A6XX_RBBM_PRIMCTR_8_LO 0x00000550
+
+#define REG_A6XX_RBBM_PRIMCTR_8_HI 0x00000551
+
+#define REG_A6XX_RBBM_PRIMCTR_9_LO 0x00000552
+
+#define REG_A6XX_RBBM_PRIMCTR_9_HI 0x00000553
+
+#define REG_A6XX_RBBM_PRIMCTR_10_LO 0x00000554
+
+#define REG_A6XX_RBBM_PRIMCTR_10_HI 0x00000555
+
+#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+
+#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+
+#define REG_A6XX_RBBM_GBIF_HALT 0x00000016
+
+#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017
+
+#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
+#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001
+
+#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
+
+#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
+
+#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
+
+#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f
+
+#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110
+
+#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112
+
+#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113
+
+#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a
+
+#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
+
+#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ 0x0000011d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE 0x00000120
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE 0x00000121
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
+
+static inline uint32_t REG_A6XX_VSC_PERFCTR_VSC_SEL(uint32_t i0) { return 0x00000cd8 + 0x1*i0; }
+
+#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
+
+#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
+
+#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
+
+#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_LO 0x00000e05
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_HI 0x00000e06
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_LO 0x00000e07
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_HI 0x00000e08
+
+#define REG_A6XX_UCHE_TRAP_BASE_LO 0x00000e09
+
+#define REG_A6XX_UCHE_TRAP_BASE_HI 0x00000e0a
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e0b
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e0c
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e0d
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e0e
+
+#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17
+
+#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18
+
+#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0
+static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
+{
+ return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
+}
+
+static inline uint32_t REG_A6XX_UCHE_PERFCTR_UCHE_SEL(uint32_t i0) { return 0x00000e1c + 0x1*i0; }
+
+#define REG_A6XX_UCHE_CMDQ_CONFIG 0x00000e3c
+
+#define REG_A6XX_VBIF_VERSION 0x00003000
+
+#define REG_A6XX_VBIF_CLKON 0x00003001
+#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
+#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A6XX_GBIF_SCACHE_CNTL0 0x00003c01
+
+#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
+
+#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
+
+#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04
+
+#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05
+
+#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06
+
+#define REG_A6XX_GBIF_HALT 0x00003c45
+
+#define REG_A6XX_GBIF_HALT_ACK 0x00003c46
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0
+
+#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
+
+#define REG_A6XX_VSC_DBG_ECO_CNTL 0x00000c00
+
+#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
+#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03
+
+#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
+#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
+#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1
+static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK;
+}
+#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800
+#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11
+static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
+}
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000
+#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000
+#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS 0x00000c30
+
+#define REG_A6XX_VSC_PRIM_STRM_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PRIM_STRM_LIMIT 0x00000c33
+
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS 0x00000c34
+
+#define REG_A6XX_VSC_DRAW_STRM_PITCH 0x00000c36
+
+#define REG_A6XX_VSC_DRAW_STRM_LIMIT 0x00000c37
+
+static inline uint32_t REG_A6XX_VSC_STATE(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+
+#define REG_A6XX_GRAS_CL_CNTL 0x00008000
+#define A6XX_GRAS_CL_CNTL_CLIP_DISABLE 0x00000001
+#define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE 0x00000002
+#define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE 0x00000004
+#define A6XX_GRAS_CL_CNTL_UNK5 0x00000020
+#define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
+#define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE 0x00000080
+#define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE 0x00000100
+#define A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE 0x00000200
+
+#define REG_A6XX_GRAS_VS_CL_CNTL 0x00008001
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_DS_CL_CNTL 0x00008002
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_GS_CL_CNTL 0x00008003
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_MAX_LAYER_INDEX 0x00008004
+
+#define REG_A6XX_GRAS_CNTL 0x00008005
+#define A6XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
+#define A6XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
+#define A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
+#define A6XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008
+#define A6XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010
+#define A6XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020
+#define A6XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
+#define A6XX_GRAS_CNTL_COORD_MASK__SHIFT 6
+static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000001ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x0007fc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XSCALE(uint32_t i0) { return 0x00008011 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YOFFSET(uint32_t i0) { return 0x00008012 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YSCALE(uint32_t i0) { return 0x00008013 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZOFFSET(uint32_t i0) { return 0x00008014 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZSCALE(uint32_t i0) { return 0x00008015 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK 0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MIN(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MIN__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MAX(uint32_t i0) { return 0x00008071 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__MASK 0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MAX(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CNTL 0x00008090
+#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A6XX_GRAS_SU_CNTL_UNK12__MASK 0x00001000
+#define A6XX_GRAS_SU_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK12__SHIFT) & A6XX_GRAS_SU_CNTL_UNK12__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000
+#define A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A6XX_GRAS_SU_CNTL_LINE_MODE__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x00018000
+#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT 15
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_UNK17 0x00020000
+#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00040000
+#define A6XX_GRAS_SU_CNTL_UNK19__MASK 0x00780000
+#define A6XX_GRAS_SU_CNTL_UNK19__SHIFT 19
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK19(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK19__SHIFT) & A6XX_GRAS_SU_CNTL_UNK19__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
+#define A6XX_GRAS_SU_POINT_SIZE__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000008
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x00008099
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK 0x00000006
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT 1
+static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK;
+}
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_INNERCONSERVATIVERASEN 0x00000008
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK 0x00000030
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_PATH_RENDERING_CNTL 0x0000809a
+#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_UNK0 0x00000001
+#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_LINELENGTHEN 0x00000002
+
+#define REG_A6XX_GRAS_VS_LAYER_CNTL 0x0000809b
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_GS_LAYER_CNTL 0x0000809c
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_DS_LAYER_CNTL 0x0000809d
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_SC_CNTL 0x000080a0
+#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000007
+#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK 0x00000018
+#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT 3
+static inline uint32_t A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE(enum a6xx_single_prim_mode val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK 0x00000020
+#define A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT 5
+static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK 0x000000c0
+#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT 6
+static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK 0x00000100
+#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT 8
+static inline uint32_t A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION(enum a6xx_sequenced_thread_dist val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT) & A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_UNK9__MASK 0x00000e00
+#define A6XX_GRAS_SC_CNTL_UNK9__SHIFT 9
+static inline uint32_t A6XX_GRAS_SC_CNTL_UNK9(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_UNK9__SHIFT) & A6XX_GRAS_SC_CNTL_UNK9__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_EARLYVIZOUTEN 0x00001000
+
+#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
+#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x0000003f
+#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x00007f00
+#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
+#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT 18
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
+#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000
+#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
+#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_UNK27__MASK 0x08000000
+#define A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT 27
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK27(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK27__MASK;
+}
+
+#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK;
+}
+
+#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_GRAS_SAMPLE_CONFIG 0x000080a4
+#define A6XX_GRAS_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_GRAS_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
+
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_0 0x000080a5
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_1 0x000080a6
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR(uint32_t i0) { return 0x000080b1 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(uint32_t i0) { return 0x000080d1 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00003fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x3fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00003fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x3fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
+#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008
+#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010
+#define A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE 0x00000020
+#define A6XX_GRAS_LRZ_CNTL_UNK6__MASK 0x000003c0
+#define A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT 6
+static inline uint32_t A6XX_GRAS_LRZ_CNTL_UNK6(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT) & A6XX_GRAS_LRZ_CNTL_UNK6__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL 0x00008101
+#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID 0x00000001
+#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK 0x00000006
+#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT 1
+static inline uint32_t A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val)
+{
+ return ((val) << A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_MRT_BUF_INFO_0 0x00008102
+#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT) & A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103
+#define A6XX_GRAS_LRZ_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000000ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffffc00
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 10
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109
+#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
+
+#define REG_A6XX_GRAS_UNKNOWN_810A 0x0000810a
+#define A6XX_GRAS_UNKNOWN_810A_UNK0__MASK 0x000007ff
+#define A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT 0
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK0__MASK;
+}
+#define A6XX_GRAS_UNKNOWN_810A_UNK16__MASK 0x07ff0000
+#define A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT 16
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK16(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK16__MASK;
+}
+#define A6XX_GRAS_UNKNOWN_810A_UNK28__MASK 0xf0000000
+#define A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT 28
+static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK28(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK28__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
+
+#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_OVERWRITEEN 0x00000008
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK 0x00000070
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK 0x00060000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT 17
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK17(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_D24S8 0x00080000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK 0x00f00000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT 20
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT 24
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000
+#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+
+#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
+#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
+#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8407 0x00008407
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8408 0x00008408
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8409 0x00008409
+
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_1 0x0000840a
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK;
+}
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_2 0x0000840b
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK;
+}
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_DBG_ECO_CNTL 0x00008600
+#define A6XX_GRAS_DBG_ECO_CNTL_UNK7 0x00000080
+#define A6XX_GRAS_DBG_ECO_CNTL_LRZCACHELOCKDIS 0x00000800
+
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
+
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_TSE_SEL(uint32_t i0) { return 0x00008610 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_RAS_SEL(uint32_t i0) { return 0x00008614 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_LRZ_SEL(uint32_t i0) { return 0x00008618 + 0x1*i0; }
+
+#define REG_A6XX_RB_BIN_CONTROL 0x00008800
+#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
+#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00
+#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
+#define A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18
+static inline uint32_t A6XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
+#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000
+#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22
+static inline uint32_t A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
+#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
+static inline uint32_t A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_CNTL 0x00008801
+#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000038
+#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT 3
+static inline uint32_t A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040
+#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080
+#define A6XX_RB_RENDER_CNTL_UNK8__MASK 0x00000700
+#define A6XX_RB_RENDER_CNTL_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_CNTL_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_UNK8__SHIFT) & A6XX_RB_RENDER_CNTL_UNK8__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100
+#define A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600
+#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9
+static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800
+#define A6XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000
+#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+
+#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK;
+}
+
+#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_RB_SAMPLE_CONFIG 0x00008804
+#define A6XX_RB_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
+
+#define REG_A6XX_RB_SAMPLE_LOCATION_0 0x00008805
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_LOCATION_1 0x00008806
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
+#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008
+#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010
+#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
+static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
+#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
+
+#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A6XX_RB_RENDER_CONTROL1_UNK1 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000004
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
+#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK 0x00000030
+#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val)
+{
+ return ((val) << A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK;
+}
+#define A6XX_RB_RENDER_CONTROL1_SIZE 0x00000040
+#define A6XX_RB_RENDER_CONTROL1_LINELENGTHEN 0x00000080
+#define A6XX_RB_RENDER_CONTROL1_FOVEATION 0x00000100
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK 0x00000004
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF 0x00000008
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d
+#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_RB_DITHER_CNTL 0x0000880e
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK;
+}
+
+#define REG_A6XX_RB_SRGB_CNTL 0x0000880f
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_RB_SAMPLE_CNTL 0x00008810
+#define A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
+
+#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
+
+#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
+
+#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
+
+#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a
+
+#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b
+
+#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c
+
+#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d
+
+#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
+
+static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; }
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_UNK10__MASK 0x00000400
+#define A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT 10
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_UNK10(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT) & A6XX_RB_MRT_BUF_INFO_UNK10__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
+#define A6XX_RB_MRT_PITCH__MASK 0x0000ffff
+#define A6XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0x1fffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE__MASK 0xffffffff
+#define A6XX_RB_MRT_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_BASE__SHIFT) & A6XX_RB_MRT_BASE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_MRT_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_MRT_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_MRT_BASE_GMEM__SHIFT) & A6XX_RB_MRT_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
+#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861
+#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862
+#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863
+#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_CNTL 0x00008865
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE 0x00000800
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+ return ((val) << A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
+#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
+#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE 0x00000020
+#define A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040
+#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE 0x00000080
+
+#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0x00003fff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0x0fffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875
+#define A6XX_RB_DEPTH_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_Z_BOUNDS_MIN 0x00008878
+#define A6XX_RB_Z_BOUNDS_MIN__MASK 0xffffffff
+#define A6XX_RB_Z_BOUNDS_MIN__SHIFT 0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MIN(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_BOUNDS_MIN__SHIFT) & A6XX_RB_Z_BOUNDS_MIN__MASK;
+}
+
+#define REG_A6XX_RB_Z_BOUNDS_MAX 0x00008879
+#define A6XX_RB_Z_BOUNDS_MAX__MASK 0xffffffff
+#define A6XX_RB_Z_BOUNDS_MAX__SHIFT 0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MAX(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_BOUNDS_MAX__SHIFT) & A6XX_RB_Z_BOUNDS_MAX__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_INFO 0x00008881
+#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A6XX_RB_STENCIL_INFO_UNK1 0x00000002
+
+#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0x00000fff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0x00ffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884
+#define A6XX_RB_STENCIL_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCIL_BUFFER_BASE__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_STENCILREF 0x00008887
+#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
+#define A6XX_RB_STENCILREF_REF__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
+}
+#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00
+#define A6XX_RB_STENCILREF_BFREF__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK;
+}
+
+#define REG_A6XX_RB_STENCILMASK 0x00008888
+#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
+#define A6XX_RB_STENCILMASK_MASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
+}
+#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK;
+}
+
+#define REG_A6XX_RB_STENCILWRMASK 0x00008889
+#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
+#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
+}
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
+#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_UNK0 0x00000001
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A6XX_RB_LRZ_CNTL 0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+
+#define REG_A6XX_RB_Z_CLAMP_MIN 0x000088c0
+#define A6XX_RB_Z_CLAMP_MIN__MASK 0xffffffff
+#define A6XX_RB_Z_CLAMP_MIN__SHIFT 0
+static inline uint32_t A6XX_RB_Z_CLAMP_MIN(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_CLAMP_MIN__SHIFT) & A6XX_RB_Z_CLAMP_MIN__MASK;
+}
+
+#define REG_A6XX_RB_Z_CLAMP_MAX 0x000088c1
+#define A6XX_RB_Z_CLAMP_MAX__MASK 0xffffffff
+#define A6XX_RB_Z_CLAMP_MAX__SHIFT 0
+static inline uint32_t A6XX_RB_Z_CLAMP_MAX(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_CLAMP_MAX__SHIFT) & A6XX_RB_Z_CLAMP_MAX__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
+#define A6XX_RB_UNKNOWN_88D0_UNK0__MASK 0x00001fff
+#define A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT 0
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK0__MASK;
+}
+#define A6XX_RB_UNKNOWN_88D0_UNK16__MASK 0x07ff0000
+#define A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT 16
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK16(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK16__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00003fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x3fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00003fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x3fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
+#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x0000003f
+#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x00007f00
+#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
+#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00003fff
+#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x3fff0000
+#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
+}
+
+#define REG_A6XX_RB_MSAA_CNTL 0x000088d5
+#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018
+#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
+#define A6XX_RB_BLIT_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_BLIT_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_BLIT_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_BLIT_BASE_GMEM__SHIFT) & A6XX_RB_BLIT_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_UNK15 0x00008000
+
+#define REG_A6XX_RB_BLIT_DST 0x000088d8
+#define A6XX_RB_BLIT_DST__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_DST__SHIFT) & A6XX_RB_BLIT_DST__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
+#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff
+#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0x1fffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST 0x000088dc
+#define A6XX_RB_BLIT_FLAG_DST__MASK 0xffffffff
+#define A6XX_RB_BLIT_FLAG_DST__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_FLAG_DST__SHIFT) & A6XX_RB_BLIT_FLAG_DST__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK 0x0ffff800
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2
+
+#define REG_A6XX_RB_BLIT_INFO 0x000088e3
+#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
+#define A6XX_RB_BLIT_INFO_GMEM 0x00000002
+#define A6XX_RB_BLIT_INFO_SAMPLE_0 0x00000004
+#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
+}
+#define A6XX_RB_BLIT_INFO_UNK8__MASK 0x00000300
+#define A6XX_RB_BLIT_INFO_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_BLIT_INFO_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_UNK8__SHIFT) & A6XX_RB_BLIT_INFO_UNK8__MASK;
+}
+#define A6XX_RB_BLIT_INFO_UNK12__MASK 0x0000f000
+#define A6XX_RB_BLIT_INFO_UNK12__SHIFT 12
+static inline uint32_t A6XX_RB_BLIT_INFO_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_UNK12__SHIFT) & A6XX_RB_BLIT_INFO_UNK12__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH 0x000088f3
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x00fff800
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK 0x0000007f
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK 0x00000700
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x0ffff800
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK 0xffffffff
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927
+#define A6XX_RB_SAMPLE_COUNT_ADDR__MASK 0xffffffff
+#define A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_COUNT_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT) & A6XX_RB_SAMPLE_COUNT_ADDR__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_8A00 0x00008a00
+
+#define REG_A6XX_RB_UNKNOWN_8A10 0x00008a10
+
+#define REG_A6XX_RB_UNKNOWN_8A20 0x00008a20
+
+#define REG_A6XX_RB_UNKNOWN_8A30 0x00008a30
+
+#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT 0
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_OVERWRITEEN 0x00000008
+#define A6XX_RB_2D_BLIT_CNTL_UNK4__MASK 0x00000070
+#define A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK4__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__MASK 0x00060000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT 17
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK17(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK17__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_D24S8 0x00080000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__MASK 0x00f00000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT 20
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_RB_2D_BLIT_CNTL_MASK__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT 24
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_IFMT__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000
+#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK;
+}
+
+#define REG_A6XX_RB_2D_UNKNOWN_8C01 0x00008c01
+
+#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
+#define A6XX_RB_2D_DST_INFO_SRGB 0x00002000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__MASK 0x0000c000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT 14
+static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FILTER 0x00010000
+#define A6XX_RB_2D_DST_INFO_UNK17 0x00020000
+#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_RB_2D_DST_INFO_UNK19 0x00080000
+#define A6XX_RB_2D_DST_INFO_UNK20 0x00100000
+#define A6XX_RB_2D_DST_INFO_UNK21 0x00200000
+#define A6XX_RB_2D_DST_INFO_UNK22 0x00400000
+#define A6XX_RB_2D_DST_INFO_UNK23__MASK 0x07800000
+#define A6XX_RB_2D_DST_INFO_UNK23__SHIFT 23
+static inline uint32_t A6XX_RB_2D_DST_INFO_UNK23(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_UNK23__SHIFT) & A6XX_RB_2D_DST_INFO_UNK23__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_UNK28 0x10000000
+
+#define REG_A6XX_RB_2D_DST 0x00008c18
+#define A6XX_RB_2D_DST__MASK 0xffffffff
+#define A6XX_RB_2D_DST__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST__SHIFT) & A6XX_RB_2D_DST__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PITCH 0x00008c1a
+#define A6XX_RB_2D_DST_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE1 0x00008c1b
+#define A6XX_RB_2D_DST_PLANE1__MASK 0xffffffff
+#define A6XX_RB_2D_DST_PLANE1__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE1(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_PLANE1__SHIFT) & A6XX_RB_2D_DST_PLANE1__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE_PITCH 0x00008c1d
+#define A6XX_RB_2D_DST_PLANE_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE2 0x00008c1e
+#define A6XX_RB_2D_DST_PLANE2__MASK 0xffffffff
+#define A6XX_RB_2D_DST_PLANE2__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE2(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_PLANE2__SHIFT) & A6XX_RB_2D_DST_PLANE2__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20
+#define A6XX_RB_2D_DST_FLAGS__MASK 0xffffffff
+#define A6XX_RB_2D_DST_FLAGS__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_FLAGS__SHIFT) & A6XX_RB_2D_DST_FLAGS__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PITCH 0x00008c22
+#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK 0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE 0x00008c23
+#define A6XX_RB_2D_DST_FLAGS_PLANE__MASK 0xffffffff
+#define A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH 0x00008c25
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK 0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f
+
+#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+
+#define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04
+
+#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+
+#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000
+#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23
+static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK;
+}
+#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK 0x001ff000
+#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT 12
+static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK;
+}
+#define A6XX_RB_CCU_CNTL_GMEM 0x00400000
+#define A6XX_RB_CCU_CNTL_UNK2 0x00000004
+
+#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
+#define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
+#define A6XX_RB_NC_MODE_CNTL_AMSBC 0x00000010
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000400
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT 10
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR 0x00000800
+#define A6XX_RB_NC_MODE_CNTL_UNK12__MASK 0x00003000
+#define A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_PERFCTR_RB_SEL(uint32_t i0) { return 0x00008e10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_RB_PERFCTR_CCU_SEL(uint32_t i0) { return 0x00008e18 + 0x1*i0; }
+
+#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28
+
+static inline uint32_t REG_A6XX_RB_PERFCTR_CMP_SEL(uint32_t i0) { return 0x00008e2c + 0x1*i0; }
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
+
+#define REG_A6XX_RB_UNKNOWN_8E51 0x00008e51
+#define A6XX_RB_UNKNOWN_8E51__MASK 0xffffffff
+#define A6XX_RB_UNKNOWN_8E51__SHIFT 0
+static inline uint32_t A6XX_RB_UNKNOWN_8E51(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_8E51__SHIFT) & A6XX_RB_UNKNOWN_8E51__MASK;
+}
+
+#define REG_A6XX_VPC_GS_PARAM 0x00009100
+#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK 0x000000ff
+#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_PARAM_LINELENGTHLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT) & A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK;
+}
+
+#define REG_A6XX_VPC_VS_CLIP_CNTL 0x00009101
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_CLIP_CNTL 0x00009102
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_CLIP_CNTL 0x00009103
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_VS_LAYER_CNTL 0x00009104
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_LAYER_CNTL 0x00009105
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_LAYER_CNTL 0x00009106
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
+#define A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD 0x00000001
+#define A6XX_VPC_UNKNOWN_9107_UNK2 0x00000004
+
+#define REG_A6XX_VPC_POLYGON_MODE 0x00009108
+#define A6XX_VPC_POLYGON_MODE_MODE__MASK 0x00000003
+#define A6XX_VPC_POLYGON_MODE_MODE__SHIFT 0
+static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210
+
+#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
+
+static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+#define REG_A6XX_VPC_SO_CNTL 0x00009216
+#define A6XX_VPC_SO_CNTL_ADDR__MASK 0x000000ff
+#define A6XX_VPC_SO_CNTL_ADDR__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_CNTL_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_CNTL_ADDR__SHIFT) & A6XX_VPC_SO_CNTL_ADDR__MASK;
+}
+#define A6XX_VPC_SO_CNTL_RESET 0x00010000
+
+#define REG_A6XX_VPC_SO_PROG 0x00009217
+#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_EN 0x00000800
+#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_EN 0x00800000
+
+#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218
+#define A6XX_VPC_SO_STREAM_COUNTS__MASK 0xffffffff
+#define A6XX_VPC_SO_STREAM_COUNTS__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_STREAM_COUNTS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_COUNTS__SHIFT) & A6XX_VPC_SO_STREAM_COUNTS__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_VPC_SO_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_BUFFER_BASE__SHIFT) & A6XX_VPC_SO_BUFFER_BASE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_SIZE__MASK 0xfffffffc
+#define A6XX_VPC_SO_BUFFER_SIZE__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_BUFFER_SIZE(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_BUFFER_SIZE__SHIFT) & A6XX_VPC_SO_BUFFER_SIZE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_OFFSET__MASK 0xfffffffc
+#define A6XX_VPC_SO_BUFFER_OFFSET__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_BUFFER_OFFSET(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_BUFFER_OFFSET__SHIFT) & A6XX_VPC_SO_BUFFER_OFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+#define A6XX_VPC_SO_FLUSH_BASE__MASK 0xffffffff
+#define A6XX_VPC_SO_FLUSH_BASE__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_FLUSH_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_FLUSH_BASE__SHIFT) & A6XX_VPC_SO_FLUSH_BASE__MASK;
+}
+
+#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236
+#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001
+
+#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
+
+#define REG_A6XX_VPC_VS_PACK 0x00009301
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_VS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_VS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_VS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_VS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_VS_PACK_EXTRAPOS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_VS_PACK_EXTRAPOS__MASK;
+}
+
+#define REG_A6XX_VPC_GS_PACK 0x00009302
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_GS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_GS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_GS_PACK_EXTRAPOS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_GS_PACK_EXTRAPOS__MASK;
+}
+
+#define REG_A6XX_VPC_DS_PACK 0x00009303
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_DS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_DS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_DS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_DS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_DS_PACK_EXTRAPOS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_DS_PACK_EXTRAPOS__MASK;
+}
+
+#define REG_A6XX_VPC_CNTL_0 0x00009304
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__MASK 0x0000ff00
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK;
+}
+#define A6XX_VPC_CNTL_0_VARYING 0x00010000
+#define A6XX_VPC_CNTL_0_VIEWIDLOC__MASK 0xff000000
+#define A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT 24
+static inline uint32_t A6XX_VPC_CNTL_0_VIEWIDLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT) & A6XX_VPC_CNTL_0_VIEWIDLOC__MASK;
+}
+
+#define REG_A6XX_VPC_SO_STREAM_CNTL 0x00009305
+#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK 0x00000007
+#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK 0x00000038
+#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT 3
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK 0x000001c0
+#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT 6
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK 0x00000e00
+#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT 9
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000
+#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK;
+}
+
+#define REG_A6XX_VPC_SO_DISABLE 0x00009306
+#define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001
+
+#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
+
+#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
+
+#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
+
+#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603
+
+static inline uint32_t REG_A6XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x00009604 + 0x1*i0; }
+
+#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800
+
+#define REG_A6XX_PC_HS_INPUT_SIZE 0x00009801
+#define A6XX_PC_HS_INPUT_SIZE_SIZE__MASK 0x000007ff
+#define A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT 0
+static inline uint32_t A6XX_PC_HS_INPUT_SIZE_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT) & A6XX_PC_HS_INPUT_SIZE_SIZE__MASK;
+}
+#define A6XX_PC_HS_INPUT_SIZE_UNK13__MASK 0x00002000
+#define A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT 13
+static inline uint32_t A6XX_PC_HS_INPUT_SIZE_UNK13(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT) & A6XX_PC_HS_INPUT_SIZE_UNK13__MASK;
+}
+
+#define REG_A6XX_PC_TESS_CNTL 0x00009802
+#define A6XX_PC_TESS_CNTL_SPACING__MASK 0x00000003
+#define A6XX_PC_TESS_CNTL_SPACING__SHIFT 0
+static inline uint32_t A6XX_PC_TESS_CNTL_SPACING(enum a6xx_tess_spacing val)
+{
+ return ((val) << A6XX_PC_TESS_CNTL_SPACING__SHIFT) & A6XX_PC_TESS_CNTL_SPACING__MASK;
+}
+#define A6XX_PC_TESS_CNTL_OUTPUT__MASK 0x0000000c
+#define A6XX_PC_TESS_CNTL_OUTPUT__SHIFT 2
+static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val)
+{
+ return ((val) << A6XX_PC_TESS_CNTL_OUTPUT__SHIFT) & A6XX_PC_TESS_CNTL_OUTPUT__MASK;
+}
+
+#define REG_A6XX_PC_RESTART_INDEX 0x00009803
+
+#define REG_A6XX_PC_MODE_CNTL 0x00009804
+
+#define REG_A6XX_PC_POWER_CNTL 0x00009805
+
+#define REG_A6XX_PC_PRIMID_PASSTHRU 0x00009806
+
+#define REG_A6XX_PC_SO_STREAM_CNTL 0x00009808
+#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE 0x00008000
+
+#define REG_A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL 0x0000980a
+#define A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001
+
+#define REG_A6XX_PC_DRAW_CMD 0x00009840
+#define A6XX_PC_DRAW_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_PC_DRAW_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_PC_DRAW_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_DRAW_CMD_STATE_ID__SHIFT) & A6XX_PC_DRAW_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_DISPATCH_CMD 0x00009841
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_PC_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_PC_DISPATCH_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_EVENT_CMD 0x00009842
+#define A6XX_PC_EVENT_CMD_STATE_ID__MASK 0x00ff0000
+#define A6XX_PC_EVENT_CMD_STATE_ID__SHIFT 16
+static inline uint32_t A6XX_PC_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_PC_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_PC_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_PC_MARKER 0x00009880
+
+#define REG_A6XX_PC_POLYGON_MODE 0x00009981
+#define A6XX_PC_POLYGON_MODE_MODE__MASK 0x00000003
+#define A6XX_PC_POLYGON_MODE_MODE__SHIFT 0
+static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK;
+}
+
+#define REG_A6XX_PC_RASTER_CNTL 0x00009980
+#define A6XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003
+#define A6XX_PC_RASTER_CNTL_STREAM__SHIFT 0
+static inline uint32_t A6XX_PC_RASTER_CNTL_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_PC_RASTER_CNTL_STREAM__SHIFT) & A6XX_PC_RASTER_CNTL_STREAM__MASK;
+}
+#define A6XX_PC_RASTER_CNTL_DISCARD 0x00000004
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
+#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
+#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+#define A6XX_PC_PRIMITIVE_CNTL_0_TESS_UPPER_LEFT_DOMAIN_ORIGIN 0x00000004
+#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3 0x00000008
+
+#define REG_A6XX_PC_VS_OUT_CNTL 0x00009b01
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_VS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_VS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_VS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_GS_OUT_CNTL 0x00009b02
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_GS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_GS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_GS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_HS_OUT_CNTL 0x00009b03
+#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_HS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_HS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_HS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_HS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_HS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_DS_OUT_CNTL 0x00009b04
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_DS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_DS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_DS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_DS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_5 0x00009b05
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK 0x00040000
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT 18
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_UNK18(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK;
+}
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_6 0x00009b06
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK 0x000007ff
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A6XX_PC_MULTIVIEW_CNTL 0x00009b07
+#define A6XX_PC_MULTIVIEW_CNTL_ENABLE 0x00000001
+#define A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
+#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
+#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT 2
+static inline uint32_t A6XX_PC_MULTIVIEW_CNTL_VIEWS(uint32_t val)
+{
+ return ((val) << A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK;
+}
+
+#define REG_A6XX_PC_MULTIVIEW_MASK 0x00009b08
+
+#define REG_A6XX_PC_2D_EVENT_CMD 0x00009c00
+#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_2D_EVENT_CMD_EVENT__MASK;
+}
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT 8
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
+
+#define REG_A6XX_PC_DRAW_INDX_BASE 0x00009e04
+
+#define REG_A6XX_PC_DRAW_FIRST_INDX 0x00009e06
+
+#define REG_A6XX_PC_DRAW_MAX_INDICES 0x00009e07
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08
+#define A6XX_PC_TESSFACTOR_ADDR__MASK 0xffffffff
+#define A6XX_PC_TESSFACTOR_ADDR__SHIFT 0
+static inline uint32_t A6XX_PC_TESSFACTOR_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_PC_TESSFACTOR_ADDR__SHIFT) & A6XX_PC_TESSFACTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_PC_DRAW_INITIATOR 0x00009e0b
+#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK 0x00000300
+#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT 8
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT) & A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000c00
+#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT 10
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK 0x00003000
+#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT 12
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_GS_ENABLE 0x00010000
+#define A6XX_PC_DRAW_INITIATOR_TESS_ENABLE 0x00020000
+
+#define REG_A6XX_PC_DRAW_NUM_INSTANCES 0x00009e0c
+
+#define REG_A6XX_PC_DRAW_NUM_INDICES 0x00009e0d
+
+#define REG_A6XX_PC_VSTREAM_CONTROL 0x00009e11
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK 0x0000ffff
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT 0
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT) & A6XX_PC_VSTREAM_CONTROL_UNK0__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK 0x003f0000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT 16
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK 0x07c00000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT 22
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK;
+}
+
+#define REG_A6XX_PC_BIN_PRIM_STRM 0x00009e12
+#define A6XX_PC_BIN_PRIM_STRM__MASK 0xffffffff
+#define A6XX_PC_BIN_PRIM_STRM__SHIFT 0
+static inline uint32_t A6XX_PC_BIN_PRIM_STRM(uint32_t val)
+{
+ return ((val) << A6XX_PC_BIN_PRIM_STRM__SHIFT) & A6XX_PC_BIN_PRIM_STRM__MASK;
+}
+
+#define REG_A6XX_PC_BIN_DRAW_STRM 0x00009e14
+#define A6XX_PC_BIN_DRAW_STRM__MASK 0xffffffff
+#define A6XX_PC_BIN_DRAW_STRM__SHIFT 0
+static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val)
+{
+ return ((val) << A6XX_PC_BIN_DRAW_STRM__SHIFT) & A6XX_PC_BIN_DRAW_STRM__MASK;
+}
+
+#define REG_A6XX_PC_VISIBILITY_OVERRIDE 0x00009e1c
+#define A6XX_PC_VISIBILITY_OVERRIDE_OVERRIDE 0x00000001
+
+static inline uint32_t REG_A6XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e34 + 0x1*i0; }
+
+#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
+
+#define REG_A6XX_VFD_CONTROL_0 0x0000a000
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__MASK 0x0000003f
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_0_FETCH_CNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT) & A6XX_VFD_CONTROL_0_FETCH_CNT__MASK;
+}
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__MASK 0x00003f00
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_0_DECODE_CNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT) & A6XX_VFD_CONTROL_0_DECODE_CNT__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_1 0x0000a001
+#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK 0xff000000
+#define A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VIEWID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_2 0x0000a002
+#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_3 0x0000a003
+#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_4 0x0000a004
+#define A6XX_VFD_CONTROL_4_UNK0__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_4_UNK0__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_4_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_4_UNK0__SHIFT) & A6XX_VFD_CONTROL_4_UNK0__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_5 0x0000a005
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK;
+}
+#define A6XX_VFD_CONTROL_5_UNK8__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_5_UNK8__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_5_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_5_UNK8__SHIFT) & A6XX_VFD_CONTROL_5_UNK8__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_6 0x0000a006
+#define A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU 0x00000001
+
+#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
+#define A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK 0x00000007
+#define A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT 0
+static inline uint32_t A6XX_VFD_MODE_CNTL_RENDER_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT) & A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK;
+}
+
+#define REG_A6XX_VFD_MULTIVIEW_CNTL 0x0000a008
+#define A6XX_VFD_MULTIVIEW_CNTL_ENABLE 0x00000001
+#define A6XX_VFD_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
+#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
+#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT 2
+static inline uint32_t A6XX_VFD_MULTIVIEW_CNTL_VIEWS(uint32_t val)
+{
+ return ((val) << A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK;
+}
+
+#define REG_A6XX_VFD_ADD_OFFSET 0x0000a009
+#define A6XX_VFD_ADD_OFFSET_VERTEX 0x00000001
+#define A6XX_VFD_ADD_OFFSET_INSTANCE 0x00000002
+
+#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
+
+#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
+
+static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+#define A6XX_VFD_FETCH_BASE__MASK 0xffffffff
+#define A6XX_VFD_FETCH_BASE__SHIFT 0
+static inline uint32_t A6XX_VFD_FETCH_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VFD_FETCH_BASE__SHIFT) & A6XX_VFD_FETCH_BASE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_OFFSET__MASK 0x0001ffe0
+#define A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT 5
+static inline uint32_t A6XX_VFD_DECODE_INSTR_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT) & A6XX_VFD_DECODE_INSTR_OFFSET__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A6XX_VFD_POWER_CNTL 0x0000a0f8
+
+#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
+
+static inline uint32_t REG_A6XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; }
+
+#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801
+
+#define REG_A6XX_SP_VS_PRIMITIVE_CNTL 0x0000a802
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET 0x0000a81b
+
+#define REG_A6XX_SP_VS_OBJ_START 0x0000a81c
+#define A6XX_SP_VS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_VS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OBJ_START__SHIFT) & A6XX_SP_VS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_PARAM 0x0000a81e
+#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_ADDR 0x0000a81f
+#define A6XX_SP_VS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_VS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_VS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_SIZE 0x0000a821
+#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
+
+#define REG_A6XX_SP_VS_CONFIG 0x0000a823
+#define A6XX_SP_VS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_VS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_VS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_VS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_VS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NIBO__SHIFT) & A6XX_SP_VS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
+
+#define REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET 0x0000a825
+#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
+#define A6XX_SP_HS_CTRL_REG0_UNK20 0x00100000
+#define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_HS_WAVE_INPUT_SIZE 0x0000a831
+
+#define REG_A6XX_SP_HS_BRANCH_COND 0x0000a832
+
+#define REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET 0x0000a833
+
+#define REG_A6XX_SP_HS_OBJ_START 0x0000a834
+#define A6XX_SP_HS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_HS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_HS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_OBJ_START__SHIFT) & A6XX_SP_HS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_HS_PVT_MEM_PARAM 0x0000a836
+#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_HS_PVT_MEM_ADDR 0x0000a837
+#define A6XX_SP_HS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_HS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_HS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_HS_PVT_MEM_SIZE 0x0000a839
+#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
+
+#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
+#define A6XX_SP_HS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_HS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_HS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_HS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_HS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NIBO__SHIFT) & A6XX_SP_HS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
+
+#define REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET 0x0000a83d
+#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
+#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_DS_BRANCH_COND 0x0000a841
+
+#define REG_A6XX_SP_DS_PRIMITIVE_CNTL 0x0000a842
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_OUT(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+#define A6XX_SP_DS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET 0x0000a85b
+
+#define REG_A6XX_SP_DS_OBJ_START 0x0000a85c
+#define A6XX_SP_DS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_DS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_DS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OBJ_START__SHIFT) & A6XX_SP_DS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_PARAM 0x0000a85e
+#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_ADDR 0x0000a85f
+#define A6XX_SP_DS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_DS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_DS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_SIZE 0x0000a861
+#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
+
+#define REG_A6XX_SP_DS_CONFIG 0x0000a863
+#define A6XX_SP_DS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_DS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_DS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_DS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_DS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NIBO__SHIFT) & A6XX_SP_DS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
+
+#define REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET 0x0000a865
+#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
+#define A6XX_SP_GS_CTRL_REG0_UNK20 0x00100000
+#define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871
+
+#define REG_A6XX_SP_GS_BRANCH_COND 0x0000a872
+
+#define REG_A6XX_SP_GS_PRIMITIVE_CNTL 0x0000a873
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_OUT(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+#define A6XX_SP_GS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET 0x0000a88c
+
+#define REG_A6XX_SP_GS_OBJ_START 0x0000a88d
+#define A6XX_SP_GS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_GS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_GS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OBJ_START__SHIFT) & A6XX_SP_GS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_PARAM 0x0000a88f
+#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_ADDR 0x0000a890
+#define A6XX_SP_GS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_GS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_GS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_SIZE 0x0000a892
+#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
+
+#define REG_A6XX_SP_GS_CONFIG 0x0000a894
+#define A6XX_SP_GS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_GS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_GS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_GS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_GS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NIBO__SHIFT) & A6XX_SP_GS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
+
+#define REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET 0x0000a896
+#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_VS_TEX_SAMP 0x0000a8a0
+#define A6XX_SP_VS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_VS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_VS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_TEX_SAMP__SHIFT) & A6XX_SP_VS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_HS_TEX_SAMP 0x0000a8a2
+#define A6XX_SP_HS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_HS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_HS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_TEX_SAMP__SHIFT) & A6XX_SP_HS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_DS_TEX_SAMP 0x0000a8a4
+#define A6XX_SP_DS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_DS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_DS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_TEX_SAMP__SHIFT) & A6XX_SP_DS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_GS_TEX_SAMP 0x0000a8a6
+#define A6XX_SP_GS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_GS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_GS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_TEX_SAMP__SHIFT) & A6XX_SP_GS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_VS_TEX_CONST 0x0000a8a8
+#define A6XX_SP_VS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_VS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_VS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_TEX_CONST__SHIFT) & A6XX_SP_VS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_HS_TEX_CONST 0x0000a8aa
+#define A6XX_SP_HS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_HS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_HS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_TEX_CONST__SHIFT) & A6XX_SP_HS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_DS_TEX_CONST 0x0000a8ac
+#define A6XX_SP_DS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_DS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_DS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_TEX_CONST__SHIFT) & A6XX_SP_DS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_GS_TEX_CONST 0x0000a8ae
+#define A6XX_SP_GS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_GS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_GS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_TEX_CONST__SHIFT) & A6XX_SP_GS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE 0x00800000
+#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000
+#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_UNK27__MASK 0x18000000
+#define A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT 27
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_UNK27(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT) & A6XX_SP_FS_CTRL_REG0_UNK27__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+#define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981
+
+#define REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET 0x0000a982
+
+#define REG_A6XX_SP_FS_OBJ_START 0x0000a983
+#define A6XX_SP_FS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_FS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OBJ_START__SHIFT) & A6XX_SP_FS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_PARAM 0x0000a985
+#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_ADDR 0x0000a986
+#define A6XX_SP_FS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_FS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_FS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_SIZE 0x0000a988
+#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_FS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
+#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+
+#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK 0x00ff0000
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK 0xff000000
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT 24
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A6XX_SP_FS_MRT_REG_UNK10 0x00000400
+
+#define REG_A6XX_SP_FS_PREFETCH_CNTL 0x0000a99e
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK 0x00000007
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK3 0x00000008
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK 0x00000ff0
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK 0x00007000
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SRC__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000780
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x0000f800
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 11
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_DST__MASK 0x003f0000
+#define A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT 16
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_DST(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_DST__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x03c00000
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 22
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_HALF 0x04000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xf8000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 27
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x0000ffff
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0xffff0000
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT 16
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK;
+}
+
+#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
+
+#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
+
+#define REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET 0x0000a9a9
+#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_SEPARATEPROLOG 0x00800000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+#define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK 0x0000001f
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK;
+}
+#define A6XX_SP_CS_UNKNOWN_A9B1_UNK5 0x00000020
+#define A6XX_SP_CS_UNKNOWN_A9B1_UNK6 0x00000040
+
+#define REG_A6XX_SP_CS_BRANCH_COND 0x0000a9b2
+
+#define REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET 0x0000a9b3
+
+#define REG_A6XX_SP_CS_OBJ_START 0x0000a9b4
+#define A6XX_SP_CS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_CS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_CS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_OBJ_START__SHIFT) & A6XX_SP_CS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_PARAM 0x0000a9b6
+#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_ADDR 0x0000a9b7
+#define A6XX_SP_CS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_CS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_CS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_SIZE 0x0000a9b9
+#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba
+
+#define REG_A6XX_SP_CS_CONFIG 0x0000a9bb
+#define A6XX_SP_CS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_CS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_CS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_CS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_CS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_CS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_CS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_CS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NTEX__SHIFT) & A6XX_SP_CS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_CS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_CS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NIBO__SHIFT) & A6XX_SP_CS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
+
+#define REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET 0x0000a9bd
+#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_CS_CNTL_0 0x0000a9c2
+#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_SP_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00
+#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT 8
+static inline uint32_t A6XX_SP_CS_CNTL_0_WGSIZECONSTID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK;
+}
+#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000
+#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16
+static inline uint32_t A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK;
+}
+#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_SP_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_SP_CS_CNTL_1 0x0000a9c3
+#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
+#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
+static inline uint32_t A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
+}
+#define A6XX_SP_CS_CNTL_1_SINGLE_SP_CORE 0x00000100
+#define A6XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000200
+#define A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 9
+static inline uint32_t A6XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_SP_CS_CNTL_1_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
+
+#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0
+#define A6XX_SP_FS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_FS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_FS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_TEX_SAMP__SHIFT) & A6XX_SP_FS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2
+#define A6XX_SP_CS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_CS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_CS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_TEX_SAMP__SHIFT) & A6XX_SP_CS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_FS_TEX_CONST 0x0000a9e4
+#define A6XX_SP_FS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_FS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_FS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_TEX_CONST__SHIFT) & A6XX_SP_FS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_CS_TEX_CONST 0x0000a9e6
+#define A6XX_SP_CS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_CS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_CS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_TEX_CONST__SHIFT) & A6XX_SP_CS_TEX_CONST__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
+#define REG_A6XX_SP_CS_IBO 0x0000a9f2
+#define A6XX_SP_CS_IBO__MASK 0xffffffff
+#define A6XX_SP_CS_IBO__SHIFT 0
+static inline uint32_t A6XX_SP_CS_IBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_IBO__SHIFT) & A6XX_SP_CS_IBO__MASK;
+}
+
+#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00
+
+#define REG_A6XX_SP_MODE_CONTROL 0x0000ab00
+#define A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE 0x00000001
+#define A6XX_SP_MODE_CONTROL_ISAMMODE__MASK 0x00000006
+#define A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT 1
+static inline uint32_t A6XX_SP_MODE_CONTROL_ISAMMODE(enum a6xx_isam_mode val)
+{
+ return ((val) << A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT) & A6XX_SP_MODE_CONTROL_ISAMMODE__MASK;
+}
+#define A6XX_SP_MODE_CONTROL_SHARED_CONSTS_ENABLE 0x00000008
+
+#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
+#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_FS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_FS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_FS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NIBO__SHIFT) & A6XX_SP_FS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
+
+#define REG_A6XX_SP_IBO 0x0000ab1a
+#define A6XX_SP_IBO__MASK 0xffffffff
+#define A6XX_SP_IBO__SHIFT 0
+static inline uint32_t A6XX_SP_IBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_IBO__SHIFT) & A6XX_SP_IBO__MASK;
+}
+
+#define REG_A6XX_SP_IBO_COUNT 0x0000ab20
+
+#define REG_A6XX_SP_2D_DST_FORMAT 0x0000acc0
+#define A6XX_SP_2D_DST_FORMAT_NORM 0x00000001
+#define A6XX_SP_2D_DST_FORMAT_SINT 0x00000002
+#define A6XX_SP_2D_DST_FORMAT_UINT 0x00000004
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_2D_DST_FORMAT_SRGB 0x00000800
+#define A6XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000
+#define A6XX_SP_2D_DST_FORMAT_MASK__SHIFT 12
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+
+#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
+
+#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
+
+#define REG_A6XX_SP_CHICKEN_BITS 0x0000ae03
+
+#define REG_A6XX_SP_FLOAT_CNTL 0x0000ae04
+#define A6XX_SP_FLOAT_CNTL_F16_NO_INF 0x00000008
+
+#define REG_A6XX_SP_PERFCTR_ENABLE 0x0000ae0f
+#define A6XX_SP_PERFCTR_ENABLE_VS 0x00000001
+#define A6XX_SP_PERFCTR_ENABLE_HS 0x00000002
+#define A6XX_SP_PERFCTR_ENABLE_DS 0x00000004
+#define A6XX_SP_PERFCTR_ENABLE_GS 0x00000008
+#define A6XX_SP_PERFCTR_ENABLE_FS 0x00000010
+#define A6XX_SP_PERFCTR_ENABLE_CS 0x00000020
+
+static inline uint32_t REG_A6XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae10 + 0x1*i0; }
+
+#define REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
+
+#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180
+#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
+#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+
+#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
+
+#define REG_A6XX_SP_UNKNOWN_B190 0x0000b190
+
+#define REG_A6XX_SP_UNKNOWN_B191 0x0000b191
+
+#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK 0x0000000c
+#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK;
+}
+
+#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302
+#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
+#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304
+#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_SP_TP_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_0 0x0000b305
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1 0x0000b306
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
+#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_MODE_CNTL 0x0000b309
+#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK 0x00000003
+#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT 0
+static inline uint32_t A6XX_SP_TP_MODE_CNTL_ISAMMODE(enum a6xx_isam_mode val)
+{
+ return ((val) << A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT) & A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK;
+}
+#define A6XX_SP_TP_MODE_CNTL_UNK3__MASK 0x000000fc
+#define A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT 2
+static inline uint32_t A6XX_SP_TP_MODE_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT) & A6XX_SP_TP_MODE_CNTL_UNK3__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2
+#define A6XX_SP_PS_2D_SRC__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC__SHIFT) & A6XX_SP_PS_2D_SRC__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff
+#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5
+#define A6XX_SP_PS_2D_SRC_PLANE1__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_PLANE1__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE1(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PLANE1__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE1__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b4c7
+#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff
+#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE2 0x0000b4c8
+#define A6XX_SP_PS_2D_SRC_PLANE2__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_PLANE2__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE2(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PLANE2__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE2__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca
+#define A6XX_SP_PS_2D_SRC_FLAGS__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_FLAGS__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_FLAGS__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CD 0x0000b4cd
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CE 0x0000b4ce
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CF 0x0000b4cf
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4D0 0x0000b4d0
+
+#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_TPL1_DBG_ECO_CNTL 0x0000b600
+
+#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
+
+#define REG_A6XX_TPL1_UNKNOWN_B602 0x0000b602
+
+#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+#define A6XX_TPL1_NC_MODE_CNTL_MODE 0x00000001
+#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
+#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK;
+}
+#define A6XX_TPL1_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
+#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000010
+#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT 4
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK;
+}
+#define A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK 0x000000c0
+#define A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT 6
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UNK6(uint32_t val)
+{
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK;
+}
+
+#define REG_A6XX_TPL1_UNKNOWN_B605 0x0000b605
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
+
+static inline uint32_t REG_A6XX_TPL1_PERFCTR_TP_SEL(uint32_t i0) { return 0x0000b610 + 0x1*i0; }
+
+#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_VS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_HS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_DS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_GS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821
+#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK 0xffffffff
+#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT 0
+static inline uint32_t A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823
+
+#define REG_A6XX_HLSQ_FS_CNTL_0 0x0000b980
+#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001
+#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK;
+}
+#define A6XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002
+#define A6XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc
+#define A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2
+static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A6XX_HLSQ_FS_CNTL_0_UNK2__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_B981 0x0000b981
+
+#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
+
+#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00
+#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000
+#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_1 0x0000b998
+#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_1_SINGLE_SP_CORE 0x00000100
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9
+static inline uint32_t A6XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1
+#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK 0xffffffff
+#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT 0
+static inline uint32_t A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+
+#define REG_A6XX_HLSQ_CS_UNKNOWN_B9D0 0x0000b9d0
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK 0x0000001f
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT) & A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK;
+}
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK5 0x00000020
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6 0x00000040
+
+#define REG_A6XX_HLSQ_DRAW_CMD 0x0000bb00
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DRAW_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_DISPATCH_CMD 0x0000bb01
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_EVENT_CMD 0x0000bb02
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK 0x00ff0000
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_HLSQ_INVALIDATE_CMD 0x0000bb08
+#define A6XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001
+#define A6XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002
+#define A6XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004
+#define A6XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008
+#define A6XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_SHARED_CONST 0x00080000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_SHARED_CONST 0x00000100
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x00003e00
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK;
+}
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x0007c000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 14
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
+}
+
+#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_FS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_SHARED_CONSTS 0x0000bb11
+#define A6XX_HLSQ_SHARED_CONSTS_ENABLE 0x00000001
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+
+#define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04
+
+#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE08 0x0000be08
+
+static inline uint32_t REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000be10 + 0x1*i0; }
+
+#define REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
+
+#define REG_A6XX_CP_EVENT_START 0x0000d600
+#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_EVENT_START_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_EVENT_END 0x0000d601
+#define A6XX_CP_EVENT_END_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_EVENT_END_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_EVENT_END_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_EVENT_END_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_START 0x0000d700
+#define A6XX_CP_2D_EVENT_START_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_2D_EVENT_START_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_END 0x0000d701
+#define A6XX_CP_2D_EVENT_END_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_2D_EVENT_END_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_END_STATE_ID__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_0 0x00000000
+#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_1 0x00000001
+#define A6XX_TEX_SAMP_1_CLAMPENABLE 0x00000001
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_2 0x00000002
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK 0x00000003
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK;
+}
+#define A6XX_TEX_SAMP_2_CHROMA_LINEAR 0x00000020
+#define A6XX_TEX_SAMP_2_BCOLOR__MASK 0xffffff80
+#define A6XX_TEX_SAMP_2_BCOLOR__SHIFT 7
+static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR(uint32_t val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_BCOLOR__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_3 0x00000003
+
+#define REG_A6XX_TEX_CONST_0 0x00000000
+#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A6XX_TEX_CONST_0_SRGB 0x00000004
+#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X 0x00010000
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y 0x00040000
+#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
+#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A6XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_format val)
+{
+ return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
+}
+#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A6XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_1 0x00000001
+#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_2 0x00000002
+#define A6XX_TEX_CONST_2_BUFFER 0x00000010
+#define A6XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A6XX_TEX_CONST_2_PITCHALIGN__MASK;
+}
+#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_2_TYPE__MASK 0xe0000000
+#define A6XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
+{
+ return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_3 0x00000003
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
+static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A6XX_TEX_CONST_3_TILE_ALL 0x08000000
+#define A6XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A6XX_TEX_CONST_4 0x00000004
+#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_5 0x00000005
+#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_6 0x00000006
+#define A6XX_TEX_CONST_6_PLANE_PITCH__MASK 0xffffff00
+#define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT 8
+static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT) & A6XX_TEX_CONST_6_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_7 0x00000007
+#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_8 0x00000008
+#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_9 0x00000009
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK 0x0001ffff
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_10 0x0000000a
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK 0x0000007f
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK 0x00000f00
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT 8
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK 0x0000f000
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT 12
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_11 0x0000000b
+
+#define REG_A6XX_TEX_CONST_12 0x0000000c
+
+#define REG_A6XX_TEX_CONST_13 0x0000000d
+
+#define REG_A6XX_TEX_CONST_14 0x0000000e
+
+#define REG_A6XX_TEX_CONST_15 0x0000000f
+
+#define REG_A6XX_UBO_0 0x00000000
+#define A6XX_UBO_0_BASE_LO__MASK 0xffffffff
+#define A6XX_UBO_0_BASE_LO__SHIFT 0
+static inline uint32_t A6XX_UBO_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A6XX_UBO_0_BASE_LO__SHIFT) & A6XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A6XX_UBO_1 0x00000001
+#define A6XX_UBO_1_BASE_HI__MASK 0x0001ffff
+#define A6XX_UBO_1_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_UBO_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_UBO_1_BASE_HI__SHIFT) & A6XX_UBO_1_BASE_HI__MASK;
+}
+#define A6XX_UBO_1_SIZE__MASK 0xfffe0000
+#define A6XX_UBO_1_SIZE__SHIFT 17
+static inline uint32_t A6XX_UBO_1_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_UBO_1_SIZE__SHIFT) & A6XX_UBO_1_SIZE__MASK;
+}
+
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+
+
+#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
new file mode 100644
index 000000000..870252bef
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -0,0 +1,1658 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <drm/drm_gem.h>
+
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+#include "msm_gem.h"
+#include "msm_gpu_trace.h"
+#include "msm_mmu.h"
+
+static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ /* FIXME: add a banner here */
+ gmu->hung = true;
+
+ /* Turn off the hangcheck timer while we are resetting */
+ del_timer(&gpu->hangcheck_timer);
+
+ /* Queue the GPU handler because we need to treat this as a recovery */
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_gmu_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
+ dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
+
+ a6xx_gmu_fault(gmu);
+ }
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
+ dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+ dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a6xx_hfi_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
+ dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
+
+ a6xx_gmu_fault(gmu);
+ }
+
+ return IRQ_HANDLED;
+}
+
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
+/* Check to see if the GX rail is still powered */
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+}
+
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 perf_index;
+ unsigned long gpu_freq;
+ int ret = 0;
+
+ gpu_freq = dev_pm_opp_get_freq(opp);
+
+ if (gpu_freq == gmu->freq)
+ return;
+
+ for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+ if (gpu_freq == gmu->gpu_freqs[perf_index])
+ break;
+
+ gmu->current_perf_index = perf_index;
+ gmu->freq = gmu->gpu_freqs[perf_index];
+
+ trace_msm_gmu_freq_change(gmu->freq, perf_index);
+
+ /*
+ * This can get called from devfreq while the hardware is idle. Don't
+ * bring up the power if it isn't already active. All we're doing here
+ * is updating the frequency so that when we come back online we're at
+ * the right rate.
+ */
+ if (suspended)
+ return;
+
+ if (!gmu->legacy) {
+ a6xx_hfi_set_freq(gmu, perf_index);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+ return;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
+ ((3 & 0xf) << 28) | perf_index);
+
+ /*
+ * Send an invalid index as a vote for the bus bandwidth and let the
+ * firmware decide on the right vote
+ */
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
+
+ /* Set and clear the OOB for DCVS to trigger the GMU */
+ a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
+
+ ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ if (ret)
+ dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
+
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+}
+
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return gmu->freq;
+}
+
+static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int local = gmu->idle_level;
+
+ /* SPTP and IFPC both report as IFPC */
+ if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
+ local = GMU_IDLE_STATE_IFPC;
+
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val == local) {
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
+ !a6xx_gmu_gx_is_on(gmu))
+ return true;
+ }
+
+ return false;
+}
+
+/* Wait for the GMU to get to its most idle state */
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
+{
+ return spin_until(a6xx_gmu_check_idle_level(gmu));
+}
+
+static int a6xx_gmu_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+ u32 mask, reset_val;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
+ if (val <= 0x20010004) {
+ mask = 0xffffffff;
+ reset_val = 0xbabeface;
+ } else {
+ mask = 0x1ff;
+ reset_val = 0x100;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ /* Set the log wptr index
+ * note: downstream saves the value in poweroff and restores it here
+ */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
+ (val & mask) == reset_val, 100, 10000);
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+
+ return ret;
+}
+
+static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
+ val & 1, 100, 10000);
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
+
+ return ret;
+}
+
+struct a6xx_gmu_oob_bits {
+ int set, ack, set_new, ack_new, clear, clear_new;
+ const char *name;
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
+ [GMU_OOB_GPU_SET] = {
+ .name = "GPU_SET",
+ .set = 16,
+ .ack = 24,
+ .set_new = 30,
+ .ack_new = 31,
+ .clear = 24,
+ .clear_new = 31,
+ },
+
+ [GMU_OOB_PERFCOUNTER_SET] = {
+ .name = "PERFCOUNTER",
+ .set = 17,
+ .ack = 25,
+ .set_new = 28,
+ .ack_new = 30,
+ .clear = 25,
+ .clear_new = 29,
+ },
+
+ [GMU_OOB_BOOT_SLUMBER] = {
+ .name = "BOOT_SLUMBER",
+ .set = 22,
+ .ack = 30,
+ .clear = 30,
+ },
+
+ [GMU_OOB_DCVS_SET] = {
+ .name = "GPU_DCVS",
+ .set = 23,
+ .ack = 31,
+ .clear = 31,
+ },
+};
+
+/* Trigger a OOB (out of band) request to the GMU */
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int ret;
+ u32 val;
+ int request, ack;
+
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
+ return -EINVAL;
+
+ if (gmu->legacy) {
+ request = a6xx_gmu_oob_bits[state].set;
+ ack = a6xx_gmu_oob_bits[state].ack;
+ } else {
+ request = a6xx_gmu_oob_bits[state].set_new;
+ ack = a6xx_gmu_oob_bits[state].ack_new;
+ if (!request || !ack) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Invalid non-legacy GMU request %s\n",
+ a6xx_gmu_oob_bits[state].name);
+ return -EINVAL;
+ }
+ }
+
+ /* Trigger the equested OOB operation */
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
+
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Timeout waiting for GMU OOB set %s: 0x%x\n",
+ a6xx_gmu_oob_bits[state].name,
+ gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
+
+ /* Clear the acknowledge interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
+
+ return ret;
+}
+
+/* Clear a pending OOB state in the GMU */
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int bit;
+
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
+ return;
+
+ if (gmu->legacy)
+ bit = a6xx_gmu_oob_bits[state].clear;
+ else
+ bit = a6xx_gmu_oob_bits[state].clear_new;
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
+}
+
+/* Enable CPU control of SPTP power power collapse */
+static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ if (!gmu->legacy)
+ return 0;
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x38) == 0x28, 1, 100);
+
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+ }
+
+ return 0;
+}
+
+/* Disable CPU control of SPTP power power collapse */
+static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ if (!gmu->legacy)
+ return;
+
+ /* Make sure retention is on */
+ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x04), 100, 10000);
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+}
+
+/* Let the GMU know we are starting a boot sequence */
+static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
+{
+ u32 vote;
+
+ /* Let the GMU know we are getting ready for boot */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
+
+ /* Choose the "default" power level as the highest available */
+ vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
+ gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
+
+ /* Let the GMU know the boot sequence has started */
+ return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+}
+
+/* Let the GMU know that we are about to go into slumber */
+static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /* Disable the power counter so the GMU isn't busy */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
+ /* Disable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
+ a6xx_sptprac_disable(gmu);
+
+ if (!gmu->legacy) {
+ ret = a6xx_hfi_send_prep_slumber(gmu);
+ goto out;
+ }
+
+ /* Tell the GMU to get ready to slumber */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
+
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ if (!ret) {
+ /* Check to see if the GMU really did slumber */
+ if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
+ != 0x0f) {
+ DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+out:
+ /* Put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ return ret;
+}
+
+static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
+ /* Wait for the register to finish posting */
+ wmb();
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+ val & (1 << 1), 100, 10000);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
+ return ret;
+ }
+
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ !val, 100, 10000);
+
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Set up CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+ gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+
+ /* Enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
+}
+
+static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ val, val & (1 << 16), 100, 10000);
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+}
+
+static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
+{
+ msm_writel(value, ptr + (offset << 2));
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name);
+
+static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct platform_device *pdev = to_platform_device(gmu->dev);
+ void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+ void __iomem *seqptr = NULL;
+ uint32_t pdc_address_offset;
+ bool pdc_in_aop = false;
+
+ if (IS_ERR(pdcptr))
+ goto err;
+
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
+ pdc_in_aop = true;
+ else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
+ pdc_address_offset = 0x30090;
+ else if (adreno_is_a619(adreno_gpu))
+ pdc_address_offset = 0x300a0;
+ else
+ pdc_address_offset = 0x30080;
+
+ if (!pdc_in_aop) {
+ seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ if (IS_ERR(seqptr))
+ goto err;
+ }
+
+ /* Disable SDE clock gating */
+ gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
+ /* Setup RSC PDC handshake for sleep and wakeup */
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+
+ /* Load RSC sequencer uCode for sleep and wakeup */
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
+ } else {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ }
+
+ if (pdc_in_aop)
+ goto setup_pdc;
+
+ /* Load PDC sequencer uCode for power up and power down sequence */
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+
+ /* Set TCS commands used by PDC sequence for low power modes */
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+
+ /* Setup GPU PDC */
+setup_pdc:
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+
+ /* ensure no writes happen before the uCode is fully written */
+ wmb();
+
+err:
+ if (!IS_ERR_OR_NULL(pdcptr))
+ iounmap(pdcptr);
+ if (!IS_ERR_OR_NULL(seqptr))
+ iounmap(seqptr);
+}
+
+/*
+ * The lowest 16 bits of this value are the number of XO clock cycles for main
+ * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
+ * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
+ */
+
+#define GMU_PWR_COL_HYST 0x000a1680
+
+/* Set up the idle state for the GMU */
+static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
+{
+ /* Disable GMU WB/RB buffer */
+ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
+
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
+
+ switch (gmu->idle_level) {
+ case GMU_IDLE_STATE_IFPC:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
+ fallthrough;
+ case GMU_IDLE_STATE_SPTP:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
+ }
+
+ /* Enable RPMh GPU client */
+ gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
+ A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
+}
+
+struct block_header {
+ u32 addr;
+ u32 size;
+ u32 type;
+ u32 value;
+ u32 data[];
+};
+
+/* this should be a general kernel helper */
+static int in_range(u32 addr, u32 start, u32 size)
+{
+ return addr >= start && addr < start + size;
+}
+
+static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
+{
+ if (!in_range(blk->addr, bo->iova, bo->size))
+ return false;
+
+ memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
+ return true;
+}
+
+static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
+ const struct block_header *blk;
+ u32 reg_offset;
+
+ u32 itcm_base = 0x00000000;
+ u32 dtcm_base = 0x00040000;
+
+ if (adreno_is_a650_family(adreno_gpu))
+ dtcm_base = 0x10004000;
+
+ if (gmu->legacy) {
+ /* Sanity check the size of the firmware that was loaded */
+ if (fw_image->size > 0x8000) {
+ DRM_DEV_ERROR(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
+ (u32*) fw_image->data, fw_image->size);
+ return 0;
+ }
+
+
+ for (blk = (const struct block_header *) fw_image->data;
+ (const u8*) blk < fw_image->data + fw_image->size;
+ blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ if (blk->size == 0)
+ continue;
+
+ if (in_range(blk->addr, itcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - itcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - dtcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (!fw_block_mem(&gmu->icache, blk) &&
+ !fw_block_mem(&gmu->dcache, blk) &&
+ !fw_block_mem(&gmu->dummy, blk)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
+ blk->addr, blk->size, blk->data[0]);
+ }
+ }
+
+ return 0;
+}
+
+static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
+{
+ static bool rpmh_init;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ int ret;
+ u32 chipid;
+
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
+ }
+
+ if (state == GMU_WARM_BOOT) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ } else {
+ if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ "GMU firmware is not loaded\n"))
+ return -ENOENT;
+
+ /* Turn on register retention */
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
+ /* We only need to load the RPMh microcode once */
+ if (!rpmh_init) {
+ a6xx_gmu_rpmh_init(gmu);
+ rpmh_init = true;
+ } else {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ }
+
+ ret = a6xx_gmu_fw_load(gmu);
+ if (ret)
+ return ret;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
+
+ /* Write the iova of the HFI table */
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
+ (1 << 31) | (0xa << 18) | (0xa0));
+
+ chipid = adreno_gpu->rev.core << 24;
+ chipid |= adreno_gpu->rev.major << 16;
+ chipid |= adreno_gpu->rev.minor << 12;
+ chipid |= adreno_gpu->rev.patchid << 8;
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
+ gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+
+ /* Set up the lowest idle level on the GMU */
+ a6xx_gmu_power_config(gmu);
+
+ ret = a6xx_gmu_start(gmu);
+ if (ret)
+ return ret;
+
+ if (gmu->legacy) {
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
+ ret = a6xx_sptprac_enable(gmu);
+ if (ret)
+ return ret;
+ }
+
+ ret = a6xx_gmu_hfi_start(gmu);
+ if (ret)
+ return ret;
+
+ /* FIXME: Do we need this wmb() here? */
+ wmb();
+
+ return 0;
+}
+
+#define A6XX_HFI_IRQ_MASK \
+ (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+
+#define A6XX_GMU_IRQ_MASK \
+ (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+
+static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
+{
+ disable_irq(gmu->gmu_irq);
+ disable_irq(gmu->hfi_irq);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
+}
+
+static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* Make sure there are no outstanding RPMh votes */
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ (val & 1), 100, 1000);
+}
+
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
+ bool gx_off)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ if (gx_off) {
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
+/* Force the GMU off in case it isn't responsive */
+static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
+
+ /* Make sure there are no outstanding RPMh votes */
+ a6xx_gmu_rpmh_off(gmu);
+
+ /* Halt the gmu cm3 core */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+
+ /* Reset GPU core blocks */
+ gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
+ udelay(100);
+}
+
+static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+ struct dev_pm_opp *gpu_opp;
+ unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+ gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+ if (IS_ERR(gpu_opp))
+ return;
+
+ gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
+ a6xx_gmu_set_freq(gpu, gpu_opp, false);
+ dev_pm_opp_put(gpu_opp);
+}
+
+static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+ struct dev_pm_opp *gpu_opp;
+ unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+ gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+ if (IS_ERR(gpu_opp))
+ return;
+
+ dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
+ dev_pm_opp_put(gpu_opp);
+}
+
+int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int status, ret;
+
+ if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
+ return 0;
+
+ gmu->hung = false;
+
+ /* Turn on the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_get_sync(gmu->gxpd);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ clk_set_rate(gmu->hub_clk, 150000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret) {
+ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ return ret;
+ }
+
+ /* Set the bus quota to a reasonable value for boot */
+ a6xx_gmu_set_initial_bw(gpu, gmu);
+
+ /* Enable the GMU interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
+ enable_irq(gmu->gmu_irq);
+
+ /* Check to see if we are doing a cold or warm boot */
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+
+ /*
+ * Warm boot path does not work on newer GPUs
+ * Presumably this is because icache/dcache regions must be restored
+ */
+ if (!gmu->legacy)
+ status = GMU_COLD_BOOT;
+
+ ret = a6xx_gmu_fw_start(gmu, status);
+ if (ret)
+ goto out;
+
+ ret = a6xx_hfi_start(gmu, status);
+ if (ret)
+ goto out;
+
+ /*
+ * Turn on the GMU firmware fault interrupt after we know the boot
+ * sequence is successful
+ */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
+ enable_irq(gmu->hfi_irq);
+
+ /* Set the GPU to the current freq */
+ a6xx_gmu_set_initial_freq(gpu, gmu);
+
+out:
+ /* On failure, shut down the GMU to leave it in a good state */
+ if (ret) {
+ disable_irq(gmu->gmu_irq);
+ a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ }
+
+ return ret;
+}
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
+{
+ u32 reg;
+
+ if (!gmu->initialized)
+ return true;
+
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
+
+ if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
+ return false;
+
+ return true;
+}
+
+/* Gracefully try to shut down the GMU and by extension the GPU */
+static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 val;
+
+ /*
+ * The GMU may still be in slumber unless the GPU started so check and
+ * skip putting it back into slumber if so
+ */
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val != 0xf) {
+ int ret = a6xx_gmu_wait_for_idle(gmu);
+
+ /* If the GMU isn't responding assume it is hung */
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+
+ /* tell the GMU we want to slumber */
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
+
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+ }
+
+ /* Turn off HFI */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts and mask the hardware */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Tell RPMh to power off the GPU */
+ a6xx_rpmh_stop(gmu);
+}
+
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+
+ if (!pm_runtime_active(gmu->dev))
+ return 0;
+
+ /*
+ * Force the GMU off if we detected a hang, otherwise try to shut it
+ * down gracefully
+ */
+ if (gmu->hung)
+ a6xx_gmu_force_off(gmu);
+ else
+ a6xx_gmu_shutdown(gmu);
+
+ /* Remove the bus vote */
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
+
+ /*
+ * Make sure the GX domain is off before turning off the GMU (CX)
+ * domain. Usually the GMU does this but only if the shutdown sequence
+ * was successful
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_put_sync(gmu->gxpd);
+
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+
+ pm_runtime_put_sync(gmu->dev);
+
+ return 0;
+}
+
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
+{
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
+
+ gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
+ msm_gem_address_space_put(gmu->aspace);
+}
+
+static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
+ size_t size, u64 iova, const char *name)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct drm_device *dev = a6xx_gpu->base.base.dev;
+ uint32_t flags = MSM_BO_WC;
+ u64 range_start, range_end;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+ if (!iova) {
+ /* no fixed address - use GMU's uncached range */
+ range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
+ range_end = 0x80000000;
+ } else {
+ /* range for fixed address */
+ range_start = iova;
+ range_end = iova + size;
+ /* use IOMMU_PRIV for icache/dcache */
+ flags |= MSM_BO_MAP_PRIV;
+ }
+
+ bo->obj = msm_gem_new(dev, size, flags);
+ if (IS_ERR(bo->obj))
+ return PTR_ERR(bo->obj);
+
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ range_start, range_end);
+ if (ret) {
+ drm_gem_object_put(bo->obj);
+ return ret;
+ }
+
+ bo->virt = msm_gem_get_vaddr(bo->obj);
+ bo->size = size;
+
+ msm_gem_object_set_name(bo->obj, name);
+
+ return 0;
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return -ENODEV;
+
+ mmu = msm_iommu_new(gmu->dev, domain);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
+ if (IS_ERR(gmu->aspace)) {
+ iommu_domain_free(domain);
+ return PTR_ERR(gmu->aspace);
+ }
+
+ return 0;
+}
+
+/* Return the 'arc-level' for the given frequency */
+static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
+ unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ unsigned int val;
+
+ if (!freq)
+ return 0;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp))
+ return 0;
+
+ val = dev_pm_opp_get_level(opp);
+
+ dev_pm_opp_put(opp);
+
+ return val;
+}
+
+static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count, const char *id)
+{
+ int i, j;
+ const u16 *pri, *sec;
+ size_t pri_count, sec_count;
+
+ pri = cmd_db_read_aux_data(id, &pri_count);
+ if (IS_ERR(pri))
+ return PTR_ERR(pri);
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ pri_count >>= 1;
+ if (!pri_count)
+ return -EINVAL;
+
+ sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
+ if (IS_ERR(sec))
+ return PTR_ERR(sec);
+
+ sec_count >>= 1;
+ if (!sec_count)
+ return -EINVAL;
+
+ /* Construct a vote for each frequency */
+ for (i = 0; i < freqs_count; i++) {
+ u8 pindex = 0, sindex = 0;
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < pri_count; j++) {
+ if (pri[j] >= level) {
+ pindex = j;
+ break;
+ }
+ }
+
+ if (j == pri_count) {
+ DRM_DEV_ERROR(dev,
+ "Level %u not found in the RPMh list\n",
+ level);
+ DRM_DEV_ERROR(dev, "Available levels:\n");
+ for (j = 0; j < pri_count; j++)
+ DRM_DEV_ERROR(dev, " %u\n", pri[j]);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Look for a level in in the secondary list that matches. If
+ * nothing fits, use the maximum non zero vote
+ */
+
+ for (j = 0; j < sec_count; j++) {
+ if (sec[j] >= level) {
+ sindex = j;
+ break;
+ } else if (sec[j]) {
+ sindex = j;
+ }
+ }
+
+ /* Construct the vote */
+ votes[i] = ((pri[pindex] & 0xffff) << 16) |
+ (sindex << 8) | pindex;
+ }
+
+ return 0;
+}
+
+/*
+ * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
+ * to construct the list of votes on the CPU and send it over. Query the RPMh
+ * voltage levels and build the votes
+ */
+
+static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret;
+
+ /* Build the GX votes */
+ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
+
+ /* Build the CX votes */
+ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
+ gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
+
+ return ret;
+}
+
+static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned long freq = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" frequency level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU frequency table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" frequency */
+ freqs[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ freqs[index++] = freq++;
+ }
+
+ return index;
+}
+
+static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ int ret = 0;
+
+ /*
+ * The GMU handles its own frequency switching so build a list of
+ * available frequencies to send during initialization
+ */
+ ret = devm_pm_opp_of_add_table(gmu->dev);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ return ret;
+ }
+
+ gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
+ gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
+
+ /*
+ * The GMU also handles GPU frequency switching so build a list
+ * from the GPU OPP table
+ */
+ gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
+ gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+
+ gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+
+ /* Build the list of RPMh votes that we'll send to the GMU */
+ return a6xx_gmu_rpmh_votes_init(gmu);
+}
+
+static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
+{
+ int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
+
+ if (ret < 1)
+ return ret;
+
+ gmu->nr_clocks = ret;
+
+ gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "gmu");
+
+ gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "hub");
+
+ return 0;
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = ioremap(res->start, resource_size(res));
+ if (!ret) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ret;
+}
+
+static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, name);
+
+ ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
+ name, ret);
+ return ret;
+ }
+
+ disable_irq(irq);
+
+ return irq;
+}
+
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = to_platform_device(gmu->dev);
+
+ if (!gmu->initialized)
+ return;
+
+ pm_runtime_force_suspend(gmu->dev);
+
+ if (!IS_ERR_OR_NULL(gmu->gxpd)) {
+ pm_runtime_disable(gmu->gxpd);
+ dev_pm_domain_detach(gmu->gxpd, false);
+ }
+
+ iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
+ gmu->mmio = NULL;
+ gmu->rscc = NULL;
+
+ a6xx_gmu_memory_free(gmu);
+
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ gmu->initialized = false;
+}
+
+int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = of_find_device_by_node(node);
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ mutex_init(&gmu->lock);
+
+ gmu->dev = &pdev->dev;
+
+ of_dma_configure(gmu->dev, node, true);
+
+ /* Fow now, don't do anything fancy until we get our feet under us */
+ gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+
+ pm_runtime_enable(gmu->dev);
+
+ /* Get the list of clocks */
+ ret = a6xx_gmu_clocks_probe(gmu);
+ if (ret)
+ goto err_put_device;
+
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ goto err_put_device;
+
+
+ /* A660 now requires handling "prealloc requests" in GMU firmware
+ * For now just hardcode allocations based on the known firmware.
+ * note: there is no indication that these correspond to "dummy" or
+ * "debug" regions, but this "guess" allows reusing these BOs which
+ * are otherwise unused by a660.
+ */
+ gmu->dummy.size = SZ_4K;
+ if (adreno_is_a660_family(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
+ 0x60400000, "debug");
+ if (ret)
+ goto err_memory;
+
+ gmu->dummy.size = SZ_8K;
+ }
+
+ /* Allocate memory for the GMU dummy page */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
+ 0x60000000, "dummy");
+ if (ret)
+ goto err_memory;
+
+ /* Note that a650 family also includes a660 family: */
+ if (adreno_is_a650_family(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_16M - SZ_16K, 0x04000, "icache");
+ if (ret)
+ goto err_memory;
+ /*
+ * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
+ * to allocate icache/dcache here, as per downstream code flow, but it may not actually be
+ * necessary. If you omit this step and you don't get random pagefaults, you are likely
+ * good to go without this!
+ */
+ } else if (adreno_is_a640_family(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_256K - SZ_16K, 0x04000, "icache");
+ if (ret)
+ goto err_memory;
+
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
+ SZ_256K - SZ_16K, 0x44000, "dcache");
+ if (ret)
+ goto err_memory;
+ } else if (adreno_is_a630(adreno_gpu) || adreno_is_a615_family(adreno_gpu)) {
+ /* HFI v1, has sptprac */
+ gmu->legacy = true;
+
+ /* Allocate memory for the GMU debug region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
+ if (ret)
+ goto err_memory;
+ }
+
+ /* Allocate memory for for the HFI queues */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
+ if (ret)
+ goto err_memory;
+
+ /* Allocate memory for the GMU log region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
+ if (ret)
+ goto err_memory;
+
+ /* Map the GMU registers */
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+ if (IS_ERR(gmu->mmio)) {
+ ret = PTR_ERR(gmu->mmio);
+ goto err_memory;
+ }
+
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
+ if (IS_ERR(gmu->rscc))
+ goto err_mmio;
+ } else {
+ gmu->rscc = gmu->mmio + 0x23000;
+ }
+
+ /* Get the HFI and GMU interrupts */
+ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
+ gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
+
+ if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
+ goto err_mmio;
+
+ /*
+ * Get a link to the GX power domain to reset the GPU in case of GMU
+ * crash
+ */
+ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
+
+ /* Get the power levels for the GMU and GPU */
+ a6xx_gmu_pwrlevels_probe(gmu);
+
+ /* Set up the HFI queues */
+ a6xx_hfi_init(gmu);
+
+ gmu->initialized = true;
+
+ return 0;
+
+err_mmio:
+ iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+
+ ret = -ENODEV;
+
+err_memory:
+ a6xx_gmu_memory_free(gmu);
+err_put_device:
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
new file mode 100644
index 000000000..e034935b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_GMU_H_
+#define _A6XX_GMU_H_
+
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include "msm_drv.h"
+#include "a6xx_hfi.h"
+
+struct a6xx_gmu_bo {
+ struct drm_gem_object *obj;
+ void *virt;
+ size_t size;
+ u64 iova;
+};
+
+/*
+ * These define the different GMU wake up options - these define how both the
+ * CPU and the GMU bring up the hardware
+ */
+
+/* THe GMU has already been booted and the rentention registers are active */
+#define GMU_WARM_BOOT 0
+
+/* the GMU is coming up for the first time or back from a power collapse */
+#define GMU_COLD_BOOT 1
+
+/*
+ * These define the level of control that the GMU has - the higher the number
+ * the more things that the GMU hardware controls on its own.
+ */
+
+/* The GMU does not do any idle state management */
+#define GMU_IDLE_STATE_ACTIVE 0
+
+/* The GMU manages SPTP power collapse */
+#define GMU_IDLE_STATE_SPTP 2
+
+/* The GMU does automatic IFPC (intra-frame power collapse) */
+#define GMU_IDLE_STATE_IFPC 3
+
+struct a6xx_gmu {
+ struct device *dev;
+
+ /* For serializing communication with the GMU: */
+ struct mutex lock;
+
+ struct msm_gem_address_space *aspace;
+
+ void * __iomem mmio;
+ void * __iomem rscc;
+
+ int hfi_irq;
+ int gmu_irq;
+
+ struct device *gxpd;
+
+ int idle_level;
+
+ struct a6xx_gmu_bo hfi;
+ struct a6xx_gmu_bo debug;
+ struct a6xx_gmu_bo icache;
+ struct a6xx_gmu_bo dcache;
+ struct a6xx_gmu_bo dummy;
+ struct a6xx_gmu_bo log;
+
+ int nr_clocks;
+ struct clk_bulk_data *clocks;
+ struct clk *core_clk;
+ struct clk *hub_clk;
+
+ /* current performance index set externally */
+ int current_perf_index;
+
+ int nr_gpu_freqs;
+ unsigned long gpu_freqs[16];
+ u32 gx_arc_votes[16];
+
+ int nr_gmu_freqs;
+ unsigned long gmu_freqs[4];
+ u32 cx_arc_votes[4];
+
+ unsigned long freq;
+
+ struct a6xx_hfi_queue queues[2];
+
+ bool initialized;
+ bool hung;
+ bool legacy; /* a618 or a630 */
+};
+
+static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->mmio + (offset << 2));
+}
+
+static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ msm_writel(value, gmu->mmio + (offset << 2));
+}
+
+static inline void
+gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
+{
+ memcpy_toio(gmu->mmio + (offset << 2), data, size);
+ wmb();
+}
+
+static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
+{
+ u32 val = gmu_read(gmu, reg);
+
+ val &= ~mask;
+
+ gmu_write(gmu, reg, val | or);
+}
+
+static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ val = (u64) msm_readl(gmu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->rscc + (offset << 2));
+}
+
+static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ msm_writel(value, gmu->rscc + (offset << 2));
+}
+
+#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+/*
+ * These are the available OOB (out of band requests) to the GMU where "out of
+ * band" means that the CPU talks to the GMU directly and not through HFI.
+ * Normally this works by writing a ITCM/DTCM register and then triggering a
+ * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack"
+ * bit). The state is cleared by writing the "clear' bit to the GMU interrupt.
+ *
+ * These are used to force the GMU/GPU to stay on during a critical sequence or
+ * for hardware workarounds.
+ */
+
+enum a6xx_gmu_oob_state {
+ /*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
+ GMU_OOB_BOOT_SLUMBER = 0,
+ /*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
+ GMU_OOB_GPU_SET,
+ /*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
+ GMU_OOB_DCVS_SET,
+ /*
+ * Used to keep the GPU on for CPU-side reads of performance counters.
+ */
+ GMU_OOB_PERFCOUNTER_SET,
+};
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu);
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
+
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
new file mode 100644
index 000000000..4a3230978
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -0,0 +1,483 @@
+#ifndef A6XX_GMU_XML
+#define A6XX_GMU_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT 23
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK 0x40000000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT 30
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT 22
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT 30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT 30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_SET_MASK__MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT 23
+static inline uint32_t A6XX_GMU_OOB_DCVS_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT 31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT 31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_SET_MASK__MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_SET_MASK__SHIFT 18
+static inline uint32_t A6XX_GMU_OOB_GPU_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_SET_MASK__SHIFT) & A6XX_GMU_OOB_GPU_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT 26
+static inline uint32_t A6XX_GMU_OOB_GPU_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT 26
+static inline uint32_t A6XX_GMU_OOB_GPU_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT 17
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT 25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT 25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
+#define A6XX_HFI_IRQ_DSGQ_MASK__MASK 0x00000002
+#define A6XX_HFI_IRQ_DSGQ_MASK__SHIFT 1
+static inline uint32_t A6XX_HFI_IRQ_DSGQ_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_DSGQ_MASK__SHIFT) & A6XX_HFI_IRQ_DSGQ_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK 0x00000004
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT 2
+static inline uint32_t A6XX_HFI_IRQ_BLOCKED_MSG_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT) & A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK 0x00800000
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT 23
+static inline uint32_t A6XX_HFI_IRQ_CM3_FAULT_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT) & A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
+static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000
+#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24
+static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
+}
+#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
+#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
+
+#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
+
+#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00
+
+#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00
+
+#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0
+
+#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8
+
+#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9
+
+#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa
+
+#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc
+
+#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd
+
+#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe
+
+#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+
+#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00
+
+#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01
+
+#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
+
+#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
+
+#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001
+
+#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a
+
+#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c
+
+#define REG_A6XX_GMU_CM3_CFG 0x0000502d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK;
+}
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK;
+}
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1
+
+#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2
+
+#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080
+
+#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4
+#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001
+#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0
+#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4
+static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK;
+}
+
+#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8
+#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001
+#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010
+#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100
+#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200
+#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400
+#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800
+#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000
+#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000
+#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000
+#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000
+
+#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9
+
+#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
+
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF 0x000050f1
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
+
+#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089
+
+#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3
+
+#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180
+
+#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181
+
+#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182
+
+#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183
+
+#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184
+
+#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185
+
+#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192
+#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001
+#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
+
+#define REG_A6XX_GMU_GENERAL_1 0x000051c6
+
+#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+
+#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
+
+#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d
+
+#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e
+
+#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310
+
+#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
+
+#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
+
+#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
+
+#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
+
+#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312
+
+#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03
+
+#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
+
+#define REG_A6XX_GPU_CPR_FSM_CTL 0x0000c001
+
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a
+
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c
+
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100
+
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
+
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
+
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
+
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee
+
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496
+
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
+
+
+#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
new file mode 100644
index 000000000..95e73eddc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -0,0 +1,2078 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+#include <linux/bitfield.h>
+#include <linux/devfreq.h>
+#include <linux/reset.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define GPU_PAS_ID 13
+
+static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check tha the CX master is idle */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
+ ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a6xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
+ if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
+ }
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ update_shadow_rptr(gpu, ring);
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+}
+
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+ u64 iova)
+{
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
+ CP_REG_TO_MEM_0_CNT(2) |
+ CP_REG_TO_MEM_0_64B);
+ OUT_RING(ring, lower_32_bits(iova));
+ OUT_RING(ring, upper_32_bits(iova));
+}
+
+static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+ struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+{
+ bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+ phys_addr_t ttbr;
+ u32 asid;
+ u64 memptr = rbmemptr(ring, ttbr0);
+
+ if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
+ return;
+
+ if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+ return;
+
+ if (!sysprof) {
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ }
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
+
+ OUT_RING(ring,
+ CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
+ CP_SMMU_TABLE_UPDATE_1_ASID(asid));
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
+
+ /*
+ * Write the new TTBR0 to the memstore. This is good for debugging.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
+ OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+ OUT_RING(ring, lower_32_bits(ttbr));
+ OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+
+ /*
+ * And finally, trigger a uche flush to be sure there isn't anything
+ * lingering in that part of the GPU
+ */
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CACHE_INVALIDATE);
+
+ if (!sysprof) {
+ /*
+ * Wait for SRAM clear after the pgtable update, so the
+ * two can happen in parallel:
+ */
+ OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
+ OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
+ OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
+ OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
+ OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
+ OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
+ OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
+
+ /* Re-enable protected mode: */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+ }
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i, ibs = 0;
+
+ a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_start));
+
+ /*
+ * For PM4 the GMU register offsets are calculated from the base of the
+ * GPU registers so we need to add 0x1a800 to the register value on A630
+ * to get the right value from PM4.
+ */
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ rbmemptr_stats(ring, index, alwayson_start));
+
+ /* Invalidate CCU depth and color */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
+ }
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+ rbmemptr_stats(ring, index, alwayson_end));
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+ CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ trace_msm_gpu_submit_flush(submit,
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO));
+
+ a6xx_flush(gpu, ring);
+}
+
+/* For a615 family (a615, a616, a618 and a619) */
+const struct adreno_reglist a615_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002020},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a630_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a640_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a650_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a660_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ const struct adreno_reglist *reg;
+ unsigned int i;
+ u32 val, clock_cntl_on;
+
+ if (!adreno_gpu->info->hwcg)
+ return;
+
+ if (adreno_is_a630(adreno_gpu))
+ clock_cntl_on = 0x8aa8aa02;
+ else
+ clock_cntl_on = 0x8aa8aa82;
+
+ val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
+
+ /* Don't re-program the registers if they are already correct */
+ if ((!state && !val) || (state && (val == clock_cntl_on)))
+ return;
+
+ /* Disable SP clock before programming HWCG registers */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
+
+ for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
+ gpu_write(gpu, reg->offset, state ? reg->value : 0);
+
+ /* Enable SP clock */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
+}
+
+/* For a615, a616, a618, a619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+/* These are for a635 and a660 */
+static const u32 a660_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x012f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x015f),
+ A6XX_PROTECT_NORDWR(0x0d000, 0x05ff),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const u32 *regs = a6xx_protect;
+ unsigned i, count, count_max;
+
+ if (adreno_is_a650(adreno_gpu)) {
+ regs = a650_protect;
+ count = ARRAY_SIZE(a650_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+ } else if (adreno_is_a660_family(adreno_gpu)) {
+ regs = a660_protect;
+ count = ARRAY_SIZE(a660_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
+ } else {
+ regs = a6xx_protect;
+ count = ARRAY_SIZE(a6xx_protect);
+ count_max = 32;
+ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+ }
+
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
+
+ for (i = 0; i < count - 1; i++)
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ /* last CP_PROTECT to have "infinite" length on the last entry */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
+static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 lower_bit = 2;
+ u32 amsbc = 0;
+ u32 rgb565_predicator = 0;
+ u32 uavflagprd_inv = 0;
+
+ /* a618 is using the hw default values */
+ if (adreno_is_a618(adreno_gpu))
+ return;
+
+ if (adreno_is_a640_family(adreno_gpu))
+ amsbc = 1;
+
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
+ /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
+ lower_bit = 3;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
+ if (adreno_is_7c3(adreno_gpu)) {
+ lower_bit = 1;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
+ rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ uavflagprd_inv << 4 | lower_bit << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
+}
+
+static int a6xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002f);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+
+ /* Pad rest of the cmds with 0's */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+/*
+ * Check that the microcode version is new enough to include several key
+ * security fixes. Return true if the ucode is safe.
+ */
+static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+ struct drm_gem_object *obj)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE];
+ u32 *buf = msm_gem_get_vaddr(obj);
+ bool ret = false;
+
+ if (IS_ERR(buf))
+ return false;
+
+ /*
+ * Targets up to a640 (a618, a630 and a640) need to check for a
+ * microcode version that is patched to support the whereami opcode or
+ * one that is new enough to include it by default.
+ *
+ * a650 tier targets don't need whereami but still need to be
+ * equal to or newer than 0.95 for other security fixes
+ *
+ * a660 targets have all the critical security fixes from the start
+ */
+ if (!strcmp(sqe_name, "a630_sqe.fw")) {
+ /*
+ * If the lowest nibble is 0xa that is an indication that this
+ * microcode has been patched. The actual version is in dword
+ * [3] but we only care about the patchlevel which is the lowest
+ * nibble of dword [3]
+ *
+ * Otherwise check that the firmware is greater than or equal
+ * to 1.90 which was the first version that had this fix built
+ * in
+ */
+ if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
+ (buf[0] & 0xfff) >= 0x190) {
+ a6xx_gpu->has_whereami = true;
+ ret = true;
+ goto out;
+ }
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a630 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x190);
+ } else if (!strcmp(sqe_name, "a650_sqe.fw")) {
+ if ((buf[0] & 0xfff) >= 0x095) {
+ ret = true;
+ goto out;
+ }
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a650 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x095);
+ } else if (!strcmp(sqe_name, "a660_sqe.fw")) {
+ ret = true;
+ } else {
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "unknown GPU, add it to a6xx_ucode_check_version()!!\n");
+ }
+out:
+ msm_gem_put_vaddr(obj);
+ return ret;
+}
+
+static int a6xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (!a6xx_gpu->sqe_bo) {
+ a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
+
+ if (IS_ERR(a6xx_gpu->sqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate SQE ucode: %d\n", ret);
+
+ return ret;
+ }
+
+ msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
+ if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ return -EPERM;
+ }
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
+
+ return 0;
+}
+
+static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ if (loaded)
+ return 0;
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
+#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+static int hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ /* Make sure the GMU keeps the GPU on while we set it up */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ if (a6xx_has_gbif(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Turn on 64 bit addressing for all blocks */
+ gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /* enable hardware clockgating */
+ a6xx_set_hwcg(gpu, true);
+
+ /* VBIF/GBIF start*/
+ if (adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ } else {
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ }
+
+ if (adreno_is_a630(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
+
+ if (!adreno_is_a650_family(adreno_gpu)) {
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ }
+
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+
+ if (adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
+ else
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
+
+ /* Setting the mem pool size */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
+
+ /* Setting the primFifo thresholds default values,
+ * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
+ */
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
+ else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
+ else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
+ else
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
+
+ /* Set the AHB default slave response to "ERROR" */
+ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
+
+ a6xx_set_ubwc_config(gpu);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0x1fffff);
+
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+
+ /* Set weights for bicubic filtering */
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ 0x3fe05ff4);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ 0x3fa0ebee);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ 0x3f5193ed);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ 0x3f0243f0);
+ }
+
+ /* Protect registers from the CP */
+ a6xx_set_cp_protect(gpu);
+
+ if (adreno_is_a660_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
+ }
+
+ /* Set dualQ + disable afull for A660 GPU */
+ if (adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
+
+ /* Enable expanded apriv for targets that support it */
+ if (gpu->hw_apriv) {
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
+ }
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ ret = a6xx_ucode_init(gpu);
+ if (ret)
+ goto out;
+
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
+
+ /* Targets that support extended APRIV can use the RPTR shadow from
+ * hardware but all the other ones need to disable the feature. Targets
+ * that support the WHERE_AM_I opcode can use that instead
+ */
+ if (adreno_gpu->base.hw_apriv)
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+ else
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /*
+ * Expanded APRIV and targets that support WHERE_AM_I both need a
+ * privileged buffer to store the RPTR shadow
+ */
+
+ if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
+ if (!a6xx_gpu->shadow_bo) {
+ a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+ sizeof(u32) * gpu->nr_rings,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a6xx_gpu->shadow_bo,
+ &a6xx_gpu->shadow_iova);
+
+ if (IS_ERR(a6xx_gpu->shadow))
+ return PTR_ERR(a6xx_gpu->shadow);
+
+ msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
+ shadowptr(a6xx_gpu, gpu->rb[0]));
+ }
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ gpu->cur_ctx_seqno = 0;
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+
+ ret = a6xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a6xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ } else {
+ return ret;
+ }
+
+out:
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ if (a6xx_gpu->gmu.legacy) {
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ }
+
+ return ret;
+}
+
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = hw_init(gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return ret;
+}
+
+static void a6xx_dump(struct msm_gpu *gpu)
+{
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+#define VBIF_RESET_ACK_TIMEOUT 100
+#define VBIF_RESET_ACK_MASK 0x00f0
+
+static void a6xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i, active_submits;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++)
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ /*
+ * To handle recovery specific sequences during the rpm suspend we are
+ * about to trigger
+ */
+ a6xx_gpu->hung = true;
+
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
+ /*
+ * Turn off keep alive that might have been enabled by the hang
+ * interrupt
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+
+ pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
+
+ /* active_submit won't change until we make a submission */
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+
+ /*
+ * Temporarily clear active_submits count to silence a WARN() in the
+ * runtime suspend cb
+ */
+ gpu->active_submits = 0;
+
+ /* Drop the rpm refcount from active submits */
+ if (active_submits)
+ pm_runtime_put(&gpu->pdev->dev);
+
+ /* And the final one from recover worker */
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ /* Call into gpucc driver to poll for cx gdsc collapse */
+ reset_control_reset(gpu->cx_collapse);
+
+ pm_runtime_use_autosuspend(&gpu->pdev->dev);
+
+ if (active_submits)
+ pm_runtime_get(&gpu->pdev->dev);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ gpu->active_submits = active_submits;
+ mutex_unlock(&gpu->active_lock);
+
+ msm_gpu_hw_init(gpu);
+ a6xx_gpu->hung = false;
+}
+
+static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+{
+ static const char *uche_clients[7] = {
+ "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ",
+ };
+ u32 val;
+
+ if (mid < 1 || mid > 3)
+ return "UNKNOWN";
+
+ /*
+ * The source of the data depends on the mid ID read from FSYNR1.
+ * and the client ID read from the UCHE block
+ */
+ val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF);
+
+ /* mid = 3 is most precise and refers to only one block per client */
+ if (mid == 3)
+ return uche_clients[val & 7];
+
+ /* For mid=2 the source is TP or VFD except when the client id is 0 */
+ if (mid == 2)
+ return ((val & 7) == 0) ? "TP" : "TP|VFD";
+
+ /* For mid=1 just return "UCHE" as a catchall for everything else */
+ return "UCHE";
+}
+
+static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
+{
+ if (id == 0)
+ return "CP";
+ else if (id == 4)
+ return "CCU";
+ else if (id == 6)
+ return "CDP Prefetch";
+
+ return a6xx_uche_fault_block(gpu, id);
+}
+
+#define ARM_SMMU_FSR_TF BIT(1)
+#define ARM_SMMU_FSR_PF BIT(3)
+#define ARM_SMMU_FSR_EF BIT(4)
+
+static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_gpu *gpu = arg;
+ struct adreno_smmu_fault_info *info = data;
+ const char *type = "UNKNOWN";
+ const char *block;
+ bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
+
+ /*
+ * If we aren't going to be resuming later from fault_worker, then do
+ * it now.
+ */
+ if (!do_devcoredump) {
+ gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+ }
+
+ /*
+ * Print a default message if we couldn't get the data from the
+ * adreno-smmu-priv
+ */
+ if (!info) {
+ pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
+
+ return 0;
+ }
+
+ if (info->fsr & ARM_SMMU_FSR_TF)
+ type = "TRANSLATION";
+ else if (info->fsr & ARM_SMMU_FSR_PF)
+ type = "PERMISSION";
+ else if (info->fsr & ARM_SMMU_FSR_EF)
+ type = "EXTERNAL";
+
+ block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
+
+ pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n",
+ info->ttbr0, iova,
+ flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ",
+ type, block,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
+
+ if (do_devcoredump) {
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ gpu->fault_info.ttbr0 = info->ttbr0;
+ gpu->fault_info.iova = iova;
+ gpu->fault_info.flags = flags;
+ gpu->fault_info.type = type;
+ gpu->fault_info.block = block;
+
+ kthread_queue_work(gpu->worker, &gpu->fault_work);
+ }
+
+ return 0;
+}
+
+static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
+
+ if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
+ val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A6XX_CP_INT_CP_UCODE_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP ucode error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
+
+ if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 20) ? "READ" : "WRITE",
+ (val & 0x3ffff), val);
+ }
+
+ if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
+
+ if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
+
+}
+
+static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
+ return;
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
+ gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
+ gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (priv->disable_err_irq)
+ status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a6xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a6xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
+{
+ return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
+}
+
+static void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
+{
+ msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
+}
+
+static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
+{
+ llcc_slice_deactivate(a6xx_gpu->llc_slice);
+ llcc_slice_deactivate(a6xx_gpu->htw_llc_slice);
+}
+
+static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ u32 cntl1_regval = 0;
+
+ if (IS_ERR(a6xx_gpu->llc_mmio))
+ return;
+
+ if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+
+ gpu_scid &= 0x1f;
+ cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
+ (gpu_scid << 15) | (gpu_scid << 20);
+
+ /* On A660, the SCID programming for UCHE traffic is done in
+ * A6XX_GBIF_SCACHE_CNTL0[14:10]
+ */
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+ (1 << 8), (gpu_scid << 10) | (1 << 8));
+ }
+
+ /*
+ * For targets with a MMU500, activate the slice but don't program the
+ * register. The XBL will take care of that.
+ */
+ if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) {
+ if (!a6xx_gpu->have_mmu500) {
+ u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice);
+
+ gpuhtw_scid &= 0x1f;
+ cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid);
+ }
+ }
+
+ if (!cntl1_regval)
+ return;
+
+ /*
+ * Program the slice IDs for the various GPU blocks and GPU MMU
+ * pagetables
+ */
+ if (!a6xx_gpu->have_mmu500) {
+ a6xx_llc_write(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
+
+ /*
+ * Program cacheability overrides to not allocate cache
+ * lines on a write miss
+ */
+ a6xx_llc_rmw(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
+ return;
+ }
+
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
+}
+
+static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
+{
+ llcc_slice_putd(a6xx_gpu->llc_slice);
+ llcc_slice_putd(a6xx_gpu->htw_llc_slice);
+}
+
+static void a6xx_llc_slices_init(struct platform_device *pdev,
+ struct a6xx_gpu *a6xx_gpu)
+{
+ struct device_node *phandle;
+
+ /*
+ * There is a different programming path for targets with an mmu500
+ * attached, so detect if that is the case
+ */
+ phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
+ a6xx_gpu->have_mmu500 = (phandle &&
+ of_device_is_compatible(phandle, "arm,mmu-500"));
+ of_node_put(phandle);
+
+ if (a6xx_gpu->have_mmu500)
+ a6xx_gpu->llc_mmio = NULL;
+ else
+ a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
+
+ a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
+ a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
+
+ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
+}
+
+static int a6xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ gpu->needs_hw_init = true;
+
+ trace_msm_gpu_resume(0);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = a6xx_gmu_resume(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+ if (ret)
+ return ret;
+
+ msm_devfreq_resume(gpu);
+
+ a6xx_llc_activate(a6xx_gpu);
+
+ return 0;
+}
+
+static int a6xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i, ret;
+
+ trace_msm_gpu_suspend(0);
+
+ a6xx_llc_deactivate(a6xx_gpu);
+
+ msm_devfreq_suspend(gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = a6xx_gmu_stop(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+ if (ret)
+ return ret;
+
+ if (a6xx_gpu->shadow_bo)
+ for (i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
+
+ gpu->suspend_count++;
+
+ return 0;
+}
+
+static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return 0;
+}
+
+static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ return a6xx_gpu->cur_ring;
+}
+
+static void a6xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (a6xx_gpu->sqe_bo) {
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->sqe_bo);
+ }
+
+ if (a6xx_gpu->shadow_bo) {
+ msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->shadow_bo);
+ }
+
+ a6xx_llc_slices_destroy(a6xx_gpu);
+
+ a6xx_gmu_remove(a6xx_gpu);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a6xx_gpu);
+}
+
+static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u64 busy_cycles;
+
+ /* 19.2MHz */
+ *out_sample_rate = 19200000;
+
+ busy_cycles = gmu_read64(&a6xx_gpu->gmu,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
+
+ return busy_cycles;
+}
+
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ a6xx_gmu_set_freq(gpu, opp, suspended);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+}
+
+static struct msm_gem_address_space *
+a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ u64 start, size;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ adreno_set_llc_attributes(iommu);
+
+ mmu = msm_iommu_new(&pdev->dev, iommu);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(iommu);
+ return ERR_CAST(mmu);
+ }
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+ size = iommu->geometry.aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+static struct msm_gem_address_space *
+a6xx_create_private_address_space(struct msm_gpu *gpu)
+{
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
+
+ return msm_gem_address_space_create(mmu,
+ "gpu", 0x100000000ULL,
+ adreno_private_address_space_size(gpu));
+}
+
+static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+ return a6xx_gpu->shadow[ring->id];
+
+ return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+}
+
+static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct msm_cp_state cp_state = {
+ .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
+ .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
+ .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
+ };
+ bool progress;
+
+ /*
+ * Adjust the remaining data to account for what has already been
+ * fetched from memory, but not yet consumed by the SQE.
+ *
+ * This is not *technically* correct, the amount buffered could
+ * exceed the IB size due to hw prefetching ahead, but:
+ *
+ * (1) We aren't trying to find the exact position, just whether
+ * progress has been made
+ * (2) The CP_REG_TO_MEM at the end of a submit should be enough
+ * to prevent prefetching into an unrelated submit. (And
+ * either way, at some point the ROQ will be full.)
+ */
+ cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB1_STAT) >> 16;
+ cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB2_STAT) >> 16;
+
+ progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state));
+
+ ring->last_cp_state = cp_state;
+
+ return progress;
+}
+
+static u32 a618_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 169)
+ return 1;
+ else if (fuse == 174)
+ return 2;
+
+ return UINT_MAX;
+}
+
+static u32 a619_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 120)
+ return 4;
+ else if (fuse == 138)
+ return 3;
+ else if (fuse == 169)
+ return 2;
+ else if (fuse == 180)
+ return 1;
+
+ return UINT_MAX;
+}
+
+static u32 adreno_7c3_get_speed_bin(u32 fuse)
+{
+ if (fuse == 0)
+ return 0;
+ else if (fuse == 117)
+ return 0;
+ else if (fuse == 190)
+ return 1;
+
+ return UINT_MAX;
+}
+
+static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse)
+{
+ u32 val = UINT_MAX;
+
+ if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev))
+ val = a618_get_speed_bin(fuse);
+
+ if (adreno_cmp_rev(ADRENO_REV(6, 1, 9, ANY_ID), rev))
+ val = a619_get_speed_bin(fuse);
+
+ if (adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), rev))
+ val = adreno_7c3_get_speed_bin(fuse);
+
+ if (val == UINT_MAX) {
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
+ fuse);
+ return UINT_MAX;
+ }
+
+ return (1 << val);
+}
+
+static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev)
+{
+ u32 supp_hw;
+ u32 speedbin;
+ int ret;
+
+ ret = adreno_read_speedbin(dev, &speedbin);
+ /*
+ * -ENOENT means that the platform doesn't support speedbin which is
+ * fine
+ */
+ if (ret == -ENOENT) {
+ return 0;
+ } else if (ret) {
+ dev_err_probe(dev, ret,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware\n");
+ return ret;
+ }
+
+ supp_hw = fuse_to_supp_hw(dev, rev, speedbin);
+
+ ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_address_space = a6xx_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .get_timestamp = a6xx_get_timestamp,
+};
+
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct adreno_info *info;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ adreno_gpu->registers = NULL;
+
+ /*
+ * We need to know the platform type before calling into adreno_gpu_init
+ * so that the hw_apriv flag can be correctly set. Snoop into the info
+ * and grab the revision number
+ */
+ info = adreno_info(config->rev);
+
+ if (info && (info->revn == 650 || info->revn == 660 ||
+ adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
+ adreno_gpu->base.hw_apriv = true;
+
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (info && (info->revn == 618))
+ gpu->clamp_to_idle = true;
+
+ a6xx_llc_slices_init(pdev, a6xx_gpu);
+
+ ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
+
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ ret = a6xx_gmu_init(a6xx_gpu, node);
+ of_node_put(node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
+ a6xx_fault_handler);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
new file mode 100644
index 000000000..eea2e60ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
+
+#ifndef __A6XX_GPU_H__
+#define __A6XX_GPU_H__
+
+
+#include "adreno_gpu.h"
+#include "a6xx.xml.h"
+
+#include "a6xx_gmu.h"
+
+extern bool hang_debug;
+
+struct a6xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *sqe_bo;
+ uint64_t sqe_iova;
+
+ struct msm_ringbuffer *cur_ring;
+
+ struct a6xx_gmu gmu;
+
+ struct drm_gem_object *shadow_bo;
+ uint64_t shadow_iova;
+ uint32_t *shadow;
+
+ bool has_whereami;
+
+ void __iomem *llc_mmio;
+ void *llc_slice;
+ void *htw_llc_slice;
+ bool have_mmu500;
+ bool hung;
+};
+
+#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ */
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
+ ((1 << 31) | \
+ (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define A6XX_PROTECT_RDONLY(_reg, _len) \
+ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
+{
+ if(adreno_is_a630(gpu))
+ return false;
+
+ return true;
+}
+
+#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
+ ((_ring)->id * sizeof(uint32_t)))
+
+int a6xx_gmu_resume(struct a6xx_gpu *gpu);
+int a6xx_gmu_stop(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
+
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+
+int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended);
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
+#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644
index 000000000..a023d5f96
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -0,0 +1,1342 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+ const void *handle;
+ u32 *data;
+};
+
+struct a6xx_gpu_state {
+ struct msm_gpu_state base;
+
+ struct a6xx_gpu_state_obj *gmu_registers;
+ int nr_gmu_registers;
+
+ struct a6xx_gpu_state_obj *registers;
+ int nr_registers;
+
+ struct a6xx_gpu_state_obj *shaders;
+ int nr_shaders;
+
+ struct a6xx_gpu_state_obj *clusters;
+ int nr_clusters;
+
+ struct a6xx_gpu_state_obj *dbgahb_clusters;
+ int nr_dbgahb_clusters;
+
+ struct a6xx_gpu_state_obj *indexed_regs;
+ int nr_indexed_regs;
+
+ struct a6xx_gpu_state_obj *debugbus;
+ int nr_debugbus;
+
+ struct a6xx_gpu_state_obj *vbif_debugbus;
+
+ struct a6xx_gpu_state_obj *cx_debugbus;
+ int nr_cx_debugbus;
+
+ struct msm_gpu_state_bo *gmu_log;
+ struct msm_gpu_state_bo *gmu_hfi;
+ struct msm_gpu_state_bo *gmu_debug;
+
+ s32 hfi_queue_history[2][HFI_HISTORY_SZ];
+
+ struct list_head objs;
+
+ bool gpu_initialized;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+ in[0] = val;
+ in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+ in[0] = target;
+ in[1] = (((u64) reg) << 44 | dwords);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+ in[0] = 0;
+ in[1] = 0;
+
+ return 2;
+}
+
+struct a6xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a6xx_state_memobj {
+ struct list_head node;
+ unsigned long long data[];
+};
+
+static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+ struct a6xx_state_memobj *obj =
+ kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ list_add_tail(&obj->node, &a6xx_state->objs);
+ return &obj->data;
+}
+
+static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+ size_t size)
+{
+ void *dst = state_kcalloc(a6xx_state, 1, size);
+
+ if (dst)
+ memcpy(dst, src, size);
+ return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new(gpu->dev,
+ SZ_1M, MSM_BO_WC, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+ return -EINVAL;
+
+ /* Make sure all pending memory writes are posted */
+ wmb();
+
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+ ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+ val & 0x02, 100, 10000);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+ return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+ msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+ msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+ u32 reg, int count, u32 *data)
+{
+ int i;
+
+ gpu_write(gpu, ctrl0, reg);
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, ctrl1, i);
+ data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+ }
+
+ return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+ ((16 * AXI_ARB_BLOCKS) + \
+ (18 * XIN_AXI_BLOCKS) + \
+ (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_gpu_state_obj *obj)
+{
+ u32 clk, *ptr;
+ int i;
+
+ obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+ sizeof(u32));
+ if (!obj->data)
+ return;
+
+ obj->handle = NULL;
+
+ /* Get the current clock setting */
+ clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+ /* Force on the bus so we can read it */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+ clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ /* We will read from BUS2 first, so disable BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+ /* Enable the VBIF bus for reading */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+ ptr = obj->data;
+
+ for (i = 0; i < AXI_ARB_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << (i + 16), 16, ptr);
+
+ for (i = 0; i < XIN_AXI_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << i, 18, ptr);
+
+ /* Stop BUS2 so we can turn on BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+ for (i = 0; i < XIN_CORE_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+ 1 << i, 12, ptr);
+
+ /* Restore the VBIF clock setting */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct resource *res;
+ void __iomem *cxdbg = NULL;
+ int nr_debugbus_blocks;
+
+ /* Set up the GX debug bus */
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+ /* Set up the CX debug bus - it lives elsewhere in the system so do a
+ * temporary ioremap for the registers
+ */
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+ "cx_dbgc");
+
+ if (res)
+ cxdbg = ioremap(res->start, resource_size(res));
+
+ if (cxdbg) {
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
+ 0x76543210);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
+ 0xFEDCBA98);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ }
+
+ nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
+ (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a6xx_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+
+ /*
+ * GBIF has same debugbus as of other GPU blocks, fall back to
+ * default path if GPU uses GBIF, also GBIF uses exactly same
+ * ID as of VBIF.
+ */
+ if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_get_debugbus_block(gpu, a6xx_state,
+ &a6xx_gbif_debugbus_block,
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus += 1;
+ }
+ }
+
+ /* Dump the VBIF debugbus on applicable targets */
+ if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
+
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+ }
+
+ if (cxdbg) {
+ a6xx_state->cx_debugbus =
+ state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+ sizeof(*a6xx_state->cx_debugbus));
+
+ if (a6xx_state->cx_debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+ a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_state,
+ &a6xx_cx_debugbus_blocks[i],
+ &a6xx_state->cx_debugbus[i]);
+
+ a6xx_state->nr_cx_debugbus =
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ }
+
+ iounmap(cxdbg);
+ }
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_dbgahb_cluster *dbgahb,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (dbgahb->statetype + i * 2) << 8);
+
+ for (j = 0; j < dbgahb->count; j += 2) {
+ int count = RANGE(dbgahb->registers, j);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ dbgahb->registers[j] - (dbgahb->base >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = dbgahb;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_dbgahb_clusters),
+ sizeof(*a6xx_state->dbgahb_clusters));
+
+ if (!a6xx_state->dbgahb_clusters)
+ return;
+
+ a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+ a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+ &a6xx_dbgahb_clusters[i],
+ &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_cluster *cluster,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel_reg)
+ in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+ (cluster->id << 8) | (i << 4) | i);
+
+ for (j = 0; j < cluster->count; j += 2) {
+ int count = RANGE(cluster->registers, j);
+
+ in += CRASHDUMP_READ(in, cluster->registers[j],
+ count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = cluster;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+ if (!a6xx_state->clusters)
+ return;
+
+ a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+ a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+ &a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_shader_block *block,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+ int i;
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (block->type << 8) | i);
+
+ in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+ block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = block;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->shaders = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+ if (!a6xx_state->shaders)
+ return;
+
+ a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+ a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+ &a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ regs->registers[i] - (regs->val0 >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ /* Some blocks might need to program a selector register first */
+ if (regs->val0)
+ in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+
+ in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gpu_read(gpu,
+ regs->registers[i] + j);
+ }
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ bool rscc)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++) {
+ u32 offset = regs->registers[i] + j;
+ u32 val;
+
+ if (rscc)
+ val = gmu_read_rscc(gmu, offset);
+ else
+ val = gmu_read(gmu, offset);
+
+ obj->data[index++] = val;
+ }
+ }
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+ 3, sizeof(*a6xx_state->gmu_registers));
+
+ if (!a6xx_state->gmu_registers)
+ return;
+
+ a6xx_state->nr_gmu_registers = 3;
+
+ /* Get the CX GMU registers from AHB */
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+ &a6xx_state->gmu_registers[0], false);
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+ &a6xx_state->gmu_registers[1], true);
+
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+ gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
+ &a6xx_state->gmu_registers[2], false);
+}
+
+static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
+ struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo)
+{
+ struct msm_gpu_state_bo *snapshot;
+
+ if (!bo->size)
+ return NULL;
+
+ snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
+ if (!snapshot)
+ return NULL;
+
+ snapshot->iova = bo->iova;
+ snapshot->size = bo->size;
+ snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL);
+ if (!snapshot->data)
+ return NULL;
+
+ memcpy(snapshot->data, bo->virt, bo->size);
+
+ return snapshot;
+}
+
+static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned i, j;
+
+ BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history));
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ;
+ a6xx_state->hfi_queue_history[i][j] = queue->history[idx];
+ }
+ }
+}
+
+#define A6XX_GBIF_REGLIST_SIZE 1
+static void a6xx_get_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ ARRAY_SIZE(a6xx_reglist) +
+ ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
+ int index = 0;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ a6xx_state->registers = state_kcalloc(a6xx_state,
+ count, sizeof(*a6xx_state->registers));
+
+ if (!a6xx_state->registers)
+ return;
+
+ a6xx_state->nr_registers = count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist[i],
+ &a6xx_state->registers[index++]);
+
+ if (a6xx_has_gbif(adreno_gpu))
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_gbif_reglist,
+ &a6xx_state->registers[index++]);
+ else
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_vbif_reglist,
+ &a6xx_state->registers[index++]);
+ if (!dumper) {
+ /*
+ * We can't use the crashdumper when the SMMU is stalled,
+ * because the GPU has no memory access until we resume
+ * translation (but we don't want to do that until after
+ * we have captured as much useful GPU state as possible).
+ * So instead collect registers via the CPU:
+ */
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++]);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_crashdumper_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+ a6xx_get_crashdumper_hlsq_registers(gpu,
+ a6xx_state, &a6xx_hlsq_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_indexed_registers *indexed,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+
+ obj->handle = (const void *) indexed;
+ obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ /* All the indexed banks start at address 0 */
+ gpu_write(gpu, indexed->addr, 0);
+
+ /* Read the data - each read increments the internal address by 1 */
+ for (i = 0; i < indexed->count; i++)
+ obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ u32 mempool_size;
+ int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+ int i;
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+ sizeof(*a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ /* Set the CP mempool size to 0 to stabilize it while dumping */
+ mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ /*
+ * Offset 0x2000 in the mempool is the size - copy the saved size over
+ * so the data is consistent
+ */
+ a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+ /* Restore the size in the hardware */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a6xx_crashdumper _dumper = { 0 }, *dumper = NULL;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+ GFP_KERNEL);
+ bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
+ A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
+
+ if (!a6xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&a6xx_state->objs);
+
+ /* Get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+ a6xx_get_gmu_registers(gpu, a6xx_state);
+
+ a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
+ a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
+ a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
+
+ a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
+
+ /* If GX isn't on the rest of the data isn't going to be accessible */
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return &a6xx_state->base;
+
+ /* Get the banks of indexed registers */
+ a6xx_get_indexed_registers(gpu, a6xx_state);
+
+ /*
+ * Try to initialize the crashdumper, if we are not dumping state
+ * with the SMMU stalled. The crashdumper needs memory access to
+ * write out GPU state, so we need to skip this when the SMMU is
+ * stalled in response to an iova fault
+ */
+ if (!stalled && !gpu->needs_hw_init &&
+ !a6xx_crashdumper_init(gpu, &_dumper)) {
+ dumper = &_dumper;
+ }
+
+ a6xx_get_registers(gpu, a6xx_state, dumper);
+
+ if (dumper) {
+ a6xx_get_shaders(gpu, a6xx_state, dumper);
+ a6xx_get_clusters(gpu, a6xx_state, dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
+
+ msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ }
+
+ if (snapshot_debugbus)
+ a6xx_get_debugbus(gpu, a6xx_state);
+
+ a6xx_state->gpu_initialized = !gpu->needs_hw_init;
+
+ return &a6xx_state->base;
+}
+
+static void a6xx_gpu_state_destroy(struct kref *kref)
+{
+ struct a6xx_state_memobj *obj, *tmp;
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+
+ if (a6xx_state->gmu_log)
+ kvfree(a6xx_state->gmu_log->data);
+
+ if (a6xx_state->gmu_hfi)
+ kvfree(a6xx_state->gmu_hfi->data);
+
+ if (a6xx_state->gmu_debug)
+ kvfree(a6xx_state->gmu_debug->data);
+
+ list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
+ list_del(&obj->node);
+ kvfree(obj);
+ }
+
+ adreno_gpu_state_destroy(state);
+ kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+ struct drm_printer *p)
+{
+ int i, index = 0;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < count; i += 2) {
+ u32 count = RANGE(registers, i);
+ u32 offset = registers[i];
+ int j;
+
+ for (j = 0; j < count; index++, offset++, j++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+ char out[ASCII85_BUFSZ];
+ long i, l, datalen = 0;
+
+ for (i = 0; i < len >> 2; i++) {
+ if (data[i])
+ datalen = (i + 1) << 2;
+ }
+
+ if (datalen == 0)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+
+ l = ascii85_encode_len(datalen);
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(data[i], out));
+
+ drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+ drm_puts(p, fmt);
+ drm_puts(p, name);
+ drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_shader_block *block = obj->handle;
+ int i;
+
+ if (!obj->handle)
+ return;
+
+ print_name(p, " - type: ", block->name);
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ drm_printf(p, " - bank: %d\n", i);
+ drm_printf(p, " size: %d\n", block->size);
+
+ if (!obj->data)
+ continue;
+
+ print_ascii85(p, block->size << 2,
+ obj->data + (block->size * i));
+ }
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+ struct drm_printer *p)
+{
+ int ctx, index = 0;
+
+ for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+ int j;
+
+ drm_printf(p, " - context: %d\n", ctx);
+
+ for (j = 0; j < size; j += 2) {
+ u32 count = RANGE(registers, j);
+ u32 offset = registers[j];
+ int k;
+
+ for (k = 0; k < count; index++, offset++, k++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+ }
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+ if (dbgahb) {
+ print_name(p, " - cluster-name: ", dbgahb->name);
+ a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_cluster *cluster = obj->handle;
+
+ if (cluster) {
+ print_name(p, " - cluster-name: ", cluster->name);
+ a6xx_show_cluster_data(cluster->registers, cluster->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_indexed_registers *indexed = obj->handle;
+
+ if (!indexed)
+ return;
+
+ print_name(p, " - regs-name: ", indexed->name);
+ drm_printf(p, " dwords: %d\n", indexed->count);
+
+ print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+ u32 *data, struct drm_printer *p)
+{
+ if (block) {
+ print_name(p, " - debugbus-block: ", block->name);
+
+ /*
+ * count for regular debugbus data is in quadwords,
+ * but print the size in dwords for consistency
+ */
+ drm_printf(p, " count: %d\n", block->count << 1);
+
+ print_ascii85(p, block->count << 3, data);
+ }
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+ struct drm_printer *p)
+{
+ int i;
+
+ for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+
+ if (a6xx_state->vbif_debugbus) {
+ struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+ drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n");
+ drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+ /* vbif debugbus data is in dwords. Confusing, huh? */
+ print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+ }
+
+ for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "gpu-initialized: %d\n", a6xx_state->gpu_initialized);
+
+ adreno_show(gpu, state, p);
+
+ drm_puts(p, "gmu-log:\n");
+ if (a6xx_state->gmu_log) {
+ struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova);
+ drm_printf(p, " size: %zu\n", gmu_log->size);
+ adreno_show_object(p, &gmu_log->data, gmu_log->size,
+ &gmu_log->encoded);
+ }
+
+ drm_puts(p, "gmu-hfi:\n");
+ if (a6xx_state->gmu_hfi) {
+ struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi;
+ unsigned i, j;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova);
+ drm_printf(p, " size: %zu\n", gmu_hfi->size);
+ for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) {
+ drm_printf(p, " queue-history[%u]:", i);
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]);
+ }
+ drm_printf(p, "\n");
+ }
+ adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size,
+ &gmu_hfi->encoded);
+ }
+
+ drm_puts(p, "gmu-debug:\n");
+ if (a6xx_state->gmu_debug) {
+ struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova);
+ drm_printf(p, " size: %zu\n", gmu_debug->size);
+ adreno_show_object(p, &gmu_debug->data, gmu_debug->size,
+ &gmu_debug->encoded);
+ }
+
+ drm_puts(p, "registers:\n");
+ for (i = 0; i < a6xx_state->nr_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "registers-gmu:\n");
+ for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "indexed-registers:\n");
+ for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+ a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+ drm_puts(p, "shader-blocks:\n");
+ for (i = 0; i < a6xx_state->nr_shaders; i++)
+ a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+ drm_puts(p, "clusters:\n");
+ for (i = 0; i < a6xx_state->nr_clusters; i++)
+ a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+ for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+ a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+ drm_puts(p, "debugbus:\n");
+ a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644
index 000000000..3bd2065a9
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+ 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+ 0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+ 0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+ 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+ 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+ 0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+ 0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+ 0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+ 0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+ 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+ 0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+ 0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+ 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE 0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS 3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS 5
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+ { .id = _id, .name = #_id,\
+ .registers = _reg, \
+ .count = ARRAY_SIZE(_reg), \
+ .sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+ u32 id;
+ const char *name;
+ const u32 *registers;
+ size_t count;
+ u32 sel_reg;
+ u32 sel_val;
+} a6xx_clusters[] = {
+ CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+ CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+ 0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+ 0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+ 0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+ 0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+ 0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+ 0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+ 0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+ 0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+ 0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+ 0xaa00, 0xaa00, 0xaa30, 0xaa31,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+ 0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+ 0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+ 0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+ { .name = #_id, .statetype = _type, .base = _base, \
+ .registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+ const char *name;
+ u32 statetype;
+ u32 base;
+ const u32 *registers;
+ size_t count;
+} a6xx_dbgahb_clusters[] = {
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+ 0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+ 0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+ 0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+ 0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+ 0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+ const u32 *registers;
+ size_t count;
+ u32 val0;
+ u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+ { .val0 = _base, .val1 = _type, .registers = _array, \
+ .count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+ HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+ HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+ HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+ { .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+ const char *name;
+ u32 type;
+ u32 size;
+} a6xx_shader_blocks[] = {
+ SHADER(A6XX_TP0_TMO_DATA, 0x200),
+ SHADER(A6XX_TP0_SMO_DATA, 0x80),
+ SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_TP1_TMO_DATA, 0x200),
+ SHADER(A6XX_TP1_SMO_DATA, 0x80),
+ SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_SP_INST_DATA, 0x800),
+ SHADER(A6XX_SP_LB_0_DATA, 0x800),
+ SHADER(A6XX_SP_LB_1_DATA, 0x800),
+ SHADER(A6XX_SP_LB_2_DATA, 0x800),
+ SHADER(A6XX_SP_LB_3_DATA, 0x800),
+ SHADER(A6XX_SP_LB_4_DATA, 0x800),
+ SHADER(A6XX_SP_LB_5_DATA, 0x200),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
+ SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+ SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_INST_TAG, 0x80),
+ SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+ SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+ SHADER(A6XX_SP_SMO_TAG, 0x80),
+ SHADER(A6XX_SP_STATE_DATA, 0x3f),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+ SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+ SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+ SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+ SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+ SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+ SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+ SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+ 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+ 0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+ 0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+ 0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+ 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+ 0x0540, 0x0555,
+ /* CP */
+ 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+ 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+ 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+ 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+ 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+ 0x0a00, 0x0a03,
+ /* VSC */
+ 0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+ /* UCHE */
+ 0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+ 0x0e38, 0x0e39,
+ /* GRAS */
+ 0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+ 0x8630, 0x8637,
+ /* VPC */
+ 0x9600, 0x9604, 0x9624, 0x9637,
+ /* PC */
+ 0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+ 0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+ 0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+ /* VFD */
+ 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+ 0xa630, 0xa630,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+ { .registers = _array, .count = ARRAY_SIZE(_array), \
+ .val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+ REGS(a6xx_registers, 0, 0),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+ /* RBBM_STATUS - RBBM_STATUS3 */
+ 0x210, 0x213,
+ /* CP_STATUS_1 */
+ 0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+ 0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+ 0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+ 0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+ 0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+ 0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+ 0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const u32 a6xx_gbif_registers[] = {
+ 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+ REGS(a6xx_ahb_registers, 0, 0),
+};
+
+static const struct a6xx_registers a6xx_vbif_reglist =
+ REGS(a6xx_vbif_registers, 0, 0);
+
+static const struct a6xx_registers a6xx_gbif_reglist =
+ REGS(a6xx_gbif_registers, 0, 0);
+
+static const u32 a6xx_gmu_gx_registers[] = {
+ /* GMU GX */
+ 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+ 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+ 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+ 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+ 0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+ /* GMU CX */
+ 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+ 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+ 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+ 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+ 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+ 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+ 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+ 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+ 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ /* GMU AO */
+ 0x9300, 0x9316, 0x9400, 0x9400,
+ /* GPU CC */
+ 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+ 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+ 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+ 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+ 0xb800, 0xb802,
+ /* GPU CC ACD */
+ 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const u32 a6xx_gmu_cx_rscc_registers[] = {
+ /* GPU RSCC */
+ 0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
+ 0x034c, 0x0387, 0x03ec, 0x03ef, 0x03f4, 0x042f, 0x0494, 0x0497,
+ 0x049c, 0x04d7, 0x053c, 0x053f, 0x0544, 0x057f,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+ REGS(a6xx_gmu_cx_registers, 0, 0),
+ REGS(a6xx_gmu_cx_rscc_registers, 0, 0),
+ REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static const struct a6xx_indexed_registers {
+ const char *name;
+ u32 addr;
+ u32 data;
+ u32 count;
+} a6xx_indexed_reglist[] = {
+ { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+};
+
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+ const char *name;
+ u32 id;
+ u32 count;
+} a6xx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block =
+ DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100);
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
new file mode 100644
index 000000000..2cc83e049
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/list.h>
+
+#include "a6xx_gmu.h"
+#include "a6xx_gmu.xml.h"
+#include "a6xx_gpu.h"
+
+#define HFI_MSG_ID(val) [val] = #val
+
+static const char * const a6xx_hfi_msg_id[] = {
+ HFI_MSG_ID(HFI_H2F_MSG_INIT),
+ HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
+ HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_TEST),
+ HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
+ HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
+ HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
+};
+
+static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, hdr, index = header->read_index;
+
+ if (header->read_index == header->write_index) {
+ header->rx_request = 1;
+ return 0;
+ }
+
+ hdr = queue->data[index];
+
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
+ /*
+ * If we are to assume that the GMU firmware is in fact a rational actor
+ * and is programmed to not send us a larger response than we expect
+ * then we can also assume that if the header size is unexpectedly large
+ * that it is due to memory corruption and/or hardware failure. In this
+ * case the only reasonable course of action is to BUG() to help harden
+ * the failure.
+ */
+
+ BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
+
+ for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
+ data[i] = queue->data[index];
+ index = (index + 1) % header->size;
+ }
+
+ if (!gmu->legacy)
+ index = ALIGN(index, 4) % header->size;
+
+ header->read_index = index;
+ return HFI_HEADER_SIZE(hdr);
+}
+
+static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, space, index = header->write_index;
+
+ spin_lock(&queue->lock);
+
+ space = CIRC_SPACE(header->write_index, header->read_index,
+ header->size);
+ if (space < dwords) {
+ header->dropped++;
+ spin_unlock(&queue->lock);
+ return -ENOSPC;
+ }
+
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
+ for (i = 0; i < dwords; i++) {
+ queue->data[index] = data[i];
+ index = (index + 1) % header->size;
+ }
+
+ /* Cookify any non used data at the end of the write buffer */
+ if (!gmu->legacy) {
+ for (; index % 4; index = (index + 1) % header->size)
+ queue->data[index] = 0xfafafafa;
+ }
+
+ header->write_index = index;
+ spin_unlock(&queue->lock);
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
+ return 0;
+}
+
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ u32 val;
+ int ret;
+
+ /* Wait for a response */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
+
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ return -ETIMEDOUT;
+ }
+
+ /* Clear the interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
+
+ for (;;) {
+ struct a6xx_hfi_msg_response resp;
+
+ /* Get the next packet */
+ ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
+
+ /* If the queue is empty our response never made it */
+ if (!ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
+
+ return -ENOENT;
+ }
+
+ if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
+ struct a6xx_hfi_msg_error *error =
+ (struct a6xx_hfi_msg_error *) &resp;
+
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
+ error->code);
+ continue;
+ }
+
+ if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Unexpected message id %d on the response queue\n",
+ HFI_HEADER_SEQNUM(resp.ret_header));
+ continue;
+ }
+
+ if (resp.error) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
+ }
+
+ /* All is well, copy over the buffer */
+ if (payload && payload_size)
+ memcpy(payload, resp.payload,
+ min_t(u32, payload_size, sizeof(resp.payload)));
+
+ return 0;
+ }
+}
+
+static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
+ void *data, u32 size, u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
+ int ret, dwords = size >> 2;
+ u32 seqnum;
+
+ seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
+
+ /* First dword of the message is the message header - fill it in */
+ *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
+ (dwords << 8) | id;
+
+ ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ return ret;
+ }
+
+ return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
+}
+
+static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
+{
+ struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
+
+ msg.dbg_buffer_addr = (u32) gmu->debug.iova;
+ msg.dbg_buffer_size = (u32) gmu->debug.size;
+ msg.boot_state = boot_state;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
+{
+ struct a6xx_hfi_msg_fw_version msg = { 0 };
+
+ /* Currently supporting version 1.10 */
+ msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
+ version, sizeof(*version));
+}
+
+static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].acd = 0xffffffff;
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5007c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ msg->bw_level_num = 13;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x0;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x50004;
+ msg->ddr_cmds_addrs[2] = 0x50080;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[1][0] = 0x6000030c;
+ msg->ddr_cmds_data[1][1] = 0x600000db;
+ msg->ddr_cmds_data[1][2] = 0x60000008;
+ msg->ddr_cmds_data[2][0] = 0x60000618;
+ msg->ddr_cmds_data[2][1] = 0x600001b6;
+ msg->ddr_cmds_data[2][2] = 0x60000008;
+ msg->ddr_cmds_data[3][0] = 0x60000925;
+ msg->ddr_cmds_data[3][1] = 0x60000291;
+ msg->ddr_cmds_data[3][2] = 0x60000008;
+ msg->ddr_cmds_data[4][0] = 0x60000dc1;
+ msg->ddr_cmds_data[4][1] = 0x600003dc;
+ msg->ddr_cmds_data[4][2] = 0x60000008;
+ msg->ddr_cmds_data[5][0] = 0x600010ad;
+ msg->ddr_cmds_data[5][1] = 0x600004ae;
+ msg->ddr_cmds_data[5][2] = 0x60000008;
+ msg->ddr_cmds_data[6][0] = 0x600014c3;
+ msg->ddr_cmds_data[6][1] = 0x600005d4;
+ msg->ddr_cmds_data[6][2] = 0x60000008;
+ msg->ddr_cmds_data[7][0] = 0x6000176a;
+ msg->ddr_cmds_data[7][1] = 0x60000693;
+ msg->ddr_cmds_data[7][2] = 0x60000008;
+ msg->ddr_cmds_data[8][0] = 0x60001f01;
+ msg->ddr_cmds_data[8][1] = 0x600008b5;
+ msg->ddr_cmds_data[8][2] = 0x60000008;
+ msg->ddr_cmds_data[9][0] = 0x60002940;
+ msg->ddr_cmds_data[9][1] = 0x60000b95;
+ msg->ddr_cmds_data[9][2] = 0x60000008;
+ msg->ddr_cmds_data[10][0] = 0x60002f68;
+ msg->ddr_cmds_data[10][1] = 0x60000d50;
+ msg->ddr_cmds_data[10][2] = 0x60000008;
+ msg->ddr_cmds_data[11][0] = 0x60003700;
+ msg->ddr_cmds_data[11][1] = 0x60000f71;
+ msg->ddr_cmds_data[11][2] = 0x60000008;
+ msg->ddr_cmds_data[12][0] = 0x60003fce;
+ msg->ddr_cmds_data[12][1] = 0x600011ea;
+ msg->ddr_cmds_data[12][2] = 0x60000008;
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x0;
+
+ msg->cnoc_cmds_addrs[0] = 0x50054;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+}
+
+static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x50004;
+ msg->ddr_cmds_addrs[2] = 0x5007c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x500a4;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x500a0;
+ msg->ddr_cmds_addrs[2] = 0x50000;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50070;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x50000;
+ msg->ddr_cmds_addrs[2] = 0x50088;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5006c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5005c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes. This is used but the values for the
+ * sdm845 GMU are known and fixed so we can hard code them.
+ */
+
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x05;
+
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
+ if (adreno_is_a618(adreno_gpu))
+ a618_build_bw_table(&msg);
+ else if (adreno_is_a619(adreno_gpu))
+ a619_build_bw_table(&msg);
+ else if (adreno_is_a640_family(adreno_gpu))
+ a640_build_bw_table(&msg);
+ else if (adreno_is_a650(adreno_gpu))
+ a650_build_bw_table(&msg);
+ else if (adreno_is_7c3(adreno_gpu))
+ adreno_7c3_build_bw_table(&msg);
+ else if (adreno_is_a660(adreno_gpu))
+ a660_build_bw_table(&msg);
+ else
+ a6xx_build_bw_table(&msg);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_test msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_core_fw_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
+
+ msg.ack_type = 1; /* blocking */
+ msg.freq = index;
+ msg.bw = 0; /* TODO: bus scaling */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
+
+ /* TODO: should freq and bw fields be non-zero ? */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_get_fw_version(gmu, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * We have to get exchange version numbers per the sequence but at this
+ * point th kernel driver doesn't need to know the exact version of
+ * the GMU firmware
+ */
+
+ ret = a6xx_hfi_send_perf_table_v1(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Let the GMU know that there won't be any more HFI messages until next
+ * boot
+ */
+ a6xx_hfi_send_test(gmu);
+
+ return 0;
+}
+
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ if (gmu->legacy)
+ return a6xx_hfi_start_v1(gmu, boot_state);
+
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_core_fw_start(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Downstream driver sends this in its "a6xx_hw_init" equivalent,
+ * but seems to be no harm in sending it here
+ */
+ ret = a6xx_hfi_send_start(gmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void a6xx_hfi_stop(struct a6xx_gmu *gmu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+
+ if (!queue->header)
+ continue;
+
+ if (queue->header->read_index != queue->header->write_index)
+ DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
+
+ queue->header->read_index = 0;
+ queue->header->write_index = 0;
+
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
+ }
+}
+
+static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
+ struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
+ u32 id)
+{
+ spin_lock_init(&queue->lock);
+ queue->header = header;
+ queue->data = virt;
+ atomic_set(&queue->seqnum, 0);
+
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
+
+ /* Set up the shared memory header */
+ header->iova = iova;
+ header->type = 10 << 8 | id;
+ header->status = 1;
+ header->size = SZ_4K >> 2;
+ header->msg_size = 0;
+ header->dropped = 0;
+ header->rx_watermark = 1;
+ header->tx_watermark = 1;
+ header->rx_request = 1;
+ header->tx_request = 0;
+ header->read_index = 0;
+ header->write_index = 0;
+}
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gmu_bo *hfi = &gmu->hfi;
+ struct a6xx_hfi_queue_table_header *table = hfi->virt;
+ struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
+ u64 offset;
+ int table_size;
+
+ /*
+ * The table size is the size of the table header plus all of the queue
+ * headers
+ */
+ table_size = sizeof(*table);
+ table_size += (ARRAY_SIZE(gmu->queues) *
+ sizeof(struct a6xx_hfi_queue_header));
+
+ table->version = 0;
+ table->size = table_size;
+ /* First queue header is located immediately after the table header */
+ table->qhdr0_offset = sizeof(*table) >> 2;
+ table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
+ table->num_queues = ARRAY_SIZE(gmu->queues);
+ table->active_queues = ARRAY_SIZE(gmu->queues);
+
+ /* Command queue */
+ offset = SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
+ hfi->iova + offset, 0);
+
+ /* GMU response queue */
+ offset += SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
+ hfi->iova + offset, gmu->legacy ? 4 : 1);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
new file mode 100644
index 000000000..528110169
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_HFI_H_
+#define _A6XX_HFI_H_
+
+struct a6xx_hfi_queue_table_header {
+ u32 version;
+ u32 size; /* Size of the queue table in dwords */
+ u32 qhdr0_offset; /* Offset of the first queue header */
+ u32 qhdr_size; /* Size of the queue headers */
+ u32 num_queues; /* Number of total queues */
+ u32 active_queues; /* Number of active queues */
+};
+
+struct a6xx_hfi_queue_header {
+ u32 status;
+ u32 iova;
+ u32 type;
+ u32 size;
+ u32 msg_size;
+ u32 dropped;
+ u32 rx_watermark;
+ u32 tx_watermark;
+ u32 rx_request;
+ u32 tx_request;
+ u32 read_index;
+ u32 write_index;
+};
+
+struct a6xx_hfi_queue {
+ struct a6xx_hfi_queue_header *header;
+ spinlock_t lock;
+ u32 *data;
+ atomic_t seqnum;
+
+ /*
+ * Tracking for the start index of the last N messages in the
+ * queue, for the benefit of devcore dump / crashdec (since
+ * parsing in the reverse direction to decode the last N
+ * messages is difficult to do and would rely on heuristics
+ * which are not guaranteed to be correct)
+ */
+#define HFI_HISTORY_SZ 8
+ s32 history[HFI_HISTORY_SZ];
+ u8 history_idx;
+};
+
+/* This is the outgoing queue to the GMU */
+#define HFI_COMMAND_QUEUE 0
+
+/* THis is the incoming response queue from the GMU */
+#define HFI_RESPONSE_QUEUE 1
+
+#define HFI_HEADER_ID(msg) ((msg) & 0xff)
+#define HFI_HEADER_SIZE(msg) (((msg) >> 8) & 0xff)
+#define HFI_HEADER_SEQNUM(msg) (((msg) >> 20) & 0xfff)
+
+/* FIXME: Do we need this or can we use ARRAY_SIZE? */
+#define HFI_RESPONSE_PAYLOAD_SIZE 16
+
+/* HFI message types */
+
+#define HFI_MSG_CMD 0
+#define HFI_MSG_ACK 1
+#define HFI_MSG_ACK_V1 2
+
+#define HFI_F2H_MSG_ACK 126
+
+struct a6xx_hfi_msg_response {
+ u32 header;
+ u32 ret_header;
+ u32 error;
+ u32 payload[HFI_RESPONSE_PAYLOAD_SIZE];
+};
+
+#define HFI_F2H_MSG_ERROR 100
+
+struct a6xx_hfi_msg_error {
+ u32 header;
+ u32 code;
+ u32 payload[2];
+};
+
+#define HFI_H2F_MSG_INIT 0
+
+struct a6xx_hfi_msg_gmu_init_cmd {
+ u32 header;
+ u32 seg_id;
+ u32 dbg_buffer_addr;
+ u32 dbg_buffer_size;
+ u32 boot_state;
+};
+
+#define HFI_H2F_MSG_FW_VERSION 1
+
+struct a6xx_hfi_msg_fw_version {
+ u32 header;
+ u32 supported_version;
+};
+
+#define HFI_H2F_MSG_PERF_TABLE 4
+
+struct perf_level {
+ u32 vote;
+ u32 freq;
+};
+
+struct perf_gx_level {
+ u32 vote;
+ u32 acd;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table_v1 {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_gx_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+#define HFI_H2F_MSG_BW_TABLE 3
+
+struct a6xx_hfi_msg_bw_table {
+ u32 header;
+ u32 bw_level_num;
+ u32 cnoc_cmds_num;
+ u32 ddr_cmds_num;
+ u32 cnoc_wait_bitmask;
+ u32 ddr_wait_bitmask;
+ u32 cnoc_cmds_addrs[6];
+ u32 cnoc_cmds_data[2][6];
+ u32 ddr_cmds_addrs[8];
+ u32 ddr_cmds_data[16][8];
+};
+
+#define HFI_H2F_MSG_TEST 5
+
+struct a6xx_hfi_msg_test {
+ u32 header;
+};
+
+#define HFI_H2F_MSG_START 10
+
+struct a6xx_hfi_msg_start {
+ u32 header;
+};
+
+#define HFI_H2F_MSG_CORE_FW_START 14
+
+struct a6xx_hfi_msg_core_fw_start {
+ u32 header;
+ u32 handle;
+};
+
+#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
+
+struct a6xx_hfi_gx_bw_perf_vote_cmd {
+ u32 header;
+ u32 ack_type;
+ u32 freq;
+ u32 bw;
+};
+
+#define HFI_H2F_MSG_PREPARE_SLUMBER 33
+
+struct a6xx_hfi_prep_slumber_cmd {
+ u32 header;
+ u32 bw;
+ u32 freq;
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 000000000..abb037ccc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,685 @@
+#ifndef ADRENO_COMMON_XML
+#define ADRENO_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum chip {
+ A2XX = 0,
+ A3XX = 0,
+ A4XX = 0,
+ A5XX = 0,
+ A6XX = 0,
+};
+
+enum adreno_pa_su_sc_draw {
+ PC_DRAW_POINTS = 0,
+ PC_DRAW_LINES = 1,
+ PC_DRAW_TRIANGLES = 2,
+};
+
+enum adreno_compare_func {
+ FUNC_NEVER = 0,
+ FUNC_LESS = 1,
+ FUNC_EQUAL = 2,
+ FUNC_LEQUAL = 3,
+ FUNC_GREATER = 4,
+ FUNC_NOTEQUAL = 5,
+ FUNC_GEQUAL = 6,
+ FUNC_ALWAYS = 7,
+};
+
+enum adreno_stencil_op {
+ STENCIL_KEEP = 0,
+ STENCIL_ZERO = 1,
+ STENCIL_REPLACE = 2,
+ STENCIL_INCR_CLAMP = 3,
+ STENCIL_DECR_CLAMP = 4,
+ STENCIL_INVERT = 5,
+ STENCIL_INCR_WRAP = 6,
+ STENCIL_DECR_WRAP = 7,
+};
+
+enum adreno_rb_blend_factor {
+ FACTOR_ZERO = 0,
+ FACTOR_ONE = 1,
+ FACTOR_SRC_COLOR = 4,
+ FACTOR_ONE_MINUS_SRC_COLOR = 5,
+ FACTOR_SRC_ALPHA = 6,
+ FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ FACTOR_DST_COLOR = 8,
+ FACTOR_ONE_MINUS_DST_COLOR = 9,
+ FACTOR_DST_ALPHA = 10,
+ FACTOR_ONE_MINUS_DST_ALPHA = 11,
+ FACTOR_CONSTANT_COLOR = 12,
+ FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
+ FACTOR_CONSTANT_ALPHA = 14,
+ FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
+ FACTOR_SRC_ALPHA_SATURATE = 16,
+ FACTOR_SRC1_COLOR = 20,
+ FACTOR_ONE_MINUS_SRC1_COLOR = 21,
+ FACTOR_SRC1_ALPHA = 22,
+ FACTOR_ONE_MINUS_SRC1_ALPHA = 23,
+};
+
+enum adreno_rb_surface_endian {
+ ENDIAN_NONE = 0,
+ ENDIAN_8IN16 = 1,
+ ENDIAN_8IN32 = 2,
+ ENDIAN_16IN32 = 3,
+ ENDIAN_8IN64 = 4,
+ ENDIAN_8IN128 = 5,
+};
+
+enum adreno_rb_dither_mode {
+ DITHER_DISABLE = 0,
+ DITHER_ALWAYS = 1,
+ DITHER_IF_ALPHA_OFF = 2,
+};
+
+enum adreno_rb_depth_format {
+ DEPTHX_16 = 0,
+ DEPTHX_24_8 = 1,
+ DEPTHX_32 = 2,
+};
+
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_CLEAR = 2,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+ RB_COMPUTE_PASS = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+ MSAA_EIGHT = 3,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ CACHE = 0,
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
+enum a3xx_color_swap {
+ WZYX = 0,
+ WXYZ = 1,
+ ZYXW = 2,
+ XYZW = 3,
+};
+
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
+enum a4xx_tess_spacing {
+ EQUAL_SPACING = 0,
+ ODD_SPACING = 2,
+ EVEN_SPACING = 3,
+};
+
+enum a5xx_address_mode {
+ ADDR_32B = 0,
+ ADDR_64B = 1,
+};
+
+enum a5xx_line_mode {
+ BRESENHAM = 0,
+ RECTANGULAR = 1,
+};
+
+#define REG_AXXX_CP_RB_BASE 0x000001c0
+
+#define REG_AXXX_CP_RB_CNTL 0x000001c1
+#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
+#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
+static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
+#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
+static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
+#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
+static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
+}
+#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
+#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
+#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
+
+#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
+}
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
+}
+
+#define REG_AXXX_CP_RB_RPTR 0x000001c4
+
+#define REG_AXXX_CP_RB_WPTR 0x000001c5
+
+#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
+
+#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
+
+#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
+
+#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
+}
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
+#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
+#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
+#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
+#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
+}
+
+#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
+#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
+#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
+static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
+{
+ return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
+#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
+#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
+static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
+}
+
+#define REG_AXXX_SCRATCH_UMSK 0x000001dc
+#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
+#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
+static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
+}
+#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
+#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
+static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
+}
+
+#define REG_AXXX_SCRATCH_ADDR 0x000001dd
+
+#define REG_AXXX_CP_ME_RDADDR 0x000001ea
+
+#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
+
+#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
+
+#define REG_AXXX_CP_INT_CNTL 0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
+
+#define REG_AXXX_CP_INT_STATUS 0x000001f3
+
+#define REG_AXXX_CP_INT_ACK 0x000001f4
+
+#define REG_AXXX_CP_ME_CNTL 0x000001f6
+#define AXXX_CP_ME_CNTL_BUSY 0x20000000
+#define AXXX_CP_ME_CNTL_HALT 0x10000000
+
+#define REG_AXXX_CP_ME_STATUS 0x000001f7
+
+#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
+
+#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
+
+#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
+
+#define REG_AXXX_CP_DEBUG 0x000001fc
+#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
+#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
+#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
+#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
+#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
+#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
+#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
+#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
+
+#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
+#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440
+
+#define REG_AXXX_CP_STQ_ST_STAT 0x00000443
+
+#define REG_AXXX_CP_ST_BASE 0x0000044d
+
+#define REG_AXXX_CP_ST_BUFSZ 0x0000044e
+
+#define REG_AXXX_CP_MEQ_STAT 0x0000044f
+
+#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452
+
+#define REG_AXXX_CP_BIN_MASK_LO 0x00000454
+
+#define REG_AXXX_CP_BIN_MASK_HI 0x00000455
+
+#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456
+
+#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457
+
+#define REG_AXXX_CP_IB1_BASE 0x00000458
+
+#define REG_AXXX_CP_IB1_BUFSZ 0x00000459
+
+#define REG_AXXX_CP_IB2_BASE 0x0000045a
+
+#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
+
+#define REG_AXXX_CP_STAT 0x0000047f
+#define AXXX_CP_STAT_CP_BUSY__MASK 0x80000000
+#define AXXX_CP_STAT_CP_BUSY__SHIFT 31
+static inline uint32_t AXXX_CP_STAT_CP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_BUSY__SHIFT) & AXXX_CP_STAT_CP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK 0x40000000
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT 30
+static inline uint32_t AXXX_CP_STAT_VS_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK 0x20000000
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT 29
+static inline uint32_t AXXX_CP_STAT_PS_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK 0x10000000
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT 28
+static inline uint32_t AXXX_CP_STAT_CF_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK 0x08000000
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT 27
+static inline uint32_t AXXX_CP_STAT_RB_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ME_BUSY__MASK 0x04000000
+#define AXXX_CP_STAT_ME_BUSY__SHIFT 26
+static inline uint32_t AXXX_CP_STAT_ME_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_ME_BUSY__SHIFT) & AXXX_CP_STAT_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__MASK 0x02000000
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT 25
+static inline uint32_t AXXX_CP_STAT_MIU_WR_C_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT) & AXXX_CP_STAT_MIU_WR_C_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_3D_BUSY__MASK 0x00800000
+#define AXXX_CP_STAT_CP_3D_BUSY__SHIFT 23
+static inline uint32_t AXXX_CP_STAT_CP_3D_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_3D_BUSY__SHIFT) & AXXX_CP_STAT_CP_3D_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_NRT_BUSY__MASK 0x00400000
+#define AXXX_CP_STAT_CP_NRT_BUSY__SHIFT 22
+static inline uint32_t AXXX_CP_STAT_CP_NRT_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_NRT_BUSY__SHIFT) & AXXX_CP_STAT_CP_NRT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK 0x00200000
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT 21
+static inline uint32_t AXXX_CP_STAT_RBIU_SCRATCH_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_ME_BUSY__MASK 0x00100000
+#define AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT 20
+static inline uint32_t AXXX_CP_STAT_RCIU_ME_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__MASK 0x00080000
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT 19
+static inline uint32_t AXXX_CP_STAT_RCIU_PFP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MEQ_RING_BUSY__MASK 0x00040000
+#define AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT 18
+static inline uint32_t AXXX_CP_STAT_MEQ_RING_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT) & AXXX_CP_STAT_MEQ_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PFP_BUSY__MASK 0x00020000
+#define AXXX_CP_STAT_PFP_BUSY__SHIFT 17
+static inline uint32_t AXXX_CP_STAT_PFP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_PFP_BUSY__SHIFT) & AXXX_CP_STAT_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__MASK 0x00010000
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT 16
+static inline uint32_t AXXX_CP_STAT_ST_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_ST_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK 0x00002000
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT 13
+static inline uint32_t AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK 0x00001000
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT 12
+static inline uint32_t AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__MASK 0x00000800
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT 11
+static inline uint32_t AXXX_CP_STAT_RING_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_RING_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_BUSY__MASK 0x00000400
+#define AXXX_CP_STAT_CSF_BUSY__SHIFT 10
+static inline uint32_t AXXX_CP_STAT_CSF_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_BUSY__SHIFT) & AXXX_CP_STAT_CSF_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_ST_BUSY__MASK 0x00000200
+#define AXXX_CP_STAT_CSF_ST_BUSY__SHIFT 9
+static inline uint32_t AXXX_CP_STAT_CSF_ST_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_ST_BUSY__SHIFT) & AXXX_CP_STAT_CSF_ST_BUSY__MASK;
+}
+#define AXXX_CP_STAT_EVENT_BUSY__MASK 0x00000100
+#define AXXX_CP_STAT_EVENT_BUSY__SHIFT 8
+static inline uint32_t AXXX_CP_STAT_EVENT_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_EVENT_BUSY__SHIFT) & AXXX_CP_STAT_EVENT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK 0x00000080
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT 7
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECT2_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK 0x00000040
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT 6
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECTS_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_RING_BUSY__MASK 0x00000020
+#define AXXX_CP_STAT_CSF_RING_BUSY__SHIFT 5
+static inline uint32_t AXXX_CP_STAT_CSF_RING_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_RING_BUSY__SHIFT) & AXXX_CP_STAT_CSF_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_BUSY__MASK 0x00000010
+#define AXXX_CP_STAT_RCIU_BUSY__SHIFT 4
+static inline uint32_t AXXX_CP_STAT_RCIU_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_BUSY__MASK 0x00000008
+#define AXXX_CP_STAT_RBIU_BUSY__SHIFT 3
+static inline uint32_t AXXX_CP_STAT_RBIU_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RBIU_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK 0x00000004
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT 2
+static inline uint32_t AXXX_CP_STAT_MIU_RD_RETURN_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK 0x00000002
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT 1
+static inline uint32_t AXXX_CP_STAT_MIU_RD_REQ_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001
+
+#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
+
+#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
+
+#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
+
+#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
+
+#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
+
+#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
+
+#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
+
+#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
+
+#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604
+
+#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609
+
+#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
+
+#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
+
+#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
+
+#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
+
+#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
+
+
+#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
new file mode 100644
index 000000000..ed1e0c650
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ */
+
+#include "adreno_gpu.h"
+
+bool hang_debug = false;
+MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
+module_param_named(hang_debug, hang_debug, bool, 0600);
+
+bool snapshot_debugbus = false;
+MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
+module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
+static const struct adreno_info gpulist[] = {
+ {
+ .rev = ADRENO_REV(2, 0, 0, 0),
+ .revn = 200,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, { /* a200 on i.mx51 has only 128kib gmem */
+ .rev = ADRENO_REV(2, 0, 0, 1),
+ .revn = 201,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(2, 2, 0, ANY_ID),
+ .revn = 220,
+ .name = "A220",
+ .fw = {
+ [ADRENO_FW_PM4] = "leia_pm4_470.fw",
+ [ADRENO_FW_PFP] = "leia_pfp_470.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(3, 0, 5, ANY_ID),
+ .revn = 305,
+ .name = "A305",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(3, 0, 6, 0),
+ .revn = 307, /* because a305c is revn==306 */
+ .name = "A306",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
+ .revn = 320,
+ .name = "A320",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(3, 3, 0, ANY_ID),
+ .revn = 330,
+ .name = "A330",
+ .fw = {
+ [ADRENO_FW_PM4] = "a330_pm4.fw",
+ [ADRENO_FW_PFP] = "a330_pfp.fw",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 0, 5, ANY_ID),
+ .revn = 405,
+ .name = "A405",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 2, 0, ANY_ID),
+ .revn = 420,
+ .name = "A420",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 3, 0, ANY_ID),
+ .revn = 430,
+ .name = "A430",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 0, 6, ANY_ID),
+ .revn = 506,
+ .name = "A506",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+ ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a506_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 0, 8, ANY_ID),
+ .revn = 508,
+ .name = "A508",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a508_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 0, 9, ANY_ID),
+ .revn = 509,
+ .name = "A509",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ /* Adreno 509 uses the same ZAP as 512 */
+ .zapfw = "a512_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 1, 0, ANY_ID),
+ .revn = 510,
+ .name = "A510",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .init = a5xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 1, 2, ANY_ID),
+ .revn = 512,
+ .name = "A512",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a512_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, 2),
+ .revn = 530,
+ .name = "A530",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
+ },
+ .gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+ ADRENO_QUIRK_FAULT_DETECT_MASK,
+ .init = a5xx_gpu_init,
+ .zapfw = "a530_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
+ .revn = 540,
+ .name = "A540",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a540_gpmu.fw2",
+ },
+ .gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a540_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 1, 8, ANY_ID),
+ .revn = 618,
+ .name = "A618",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(6, 1, 9, ANY_ID),
+ .revn = 619,
+ .name = "A619",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a619_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .hwcg = a615_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 3, 0, ANY_ID),
+ .revn = 630,
+ .name = "A630",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a630_zap.mdt",
+ .hwcg = a630_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 4, 0, ANY_ID),
+ .revn = 640,
+ .name = "A640",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ .hwcg = a640_hwcg,
+ }, {
+ .rev = ADRENO_REV(6, 5, 0, ANY_ID),
+ .revn = 650,
+ .name = "A650",
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a650_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a650_zap.mdt",
+ .hwcg = a650_hwcg,
+ .address_space_size = SZ_16G,
+ }, {
+ .rev = ADRENO_REV(6, 6, 0, ANY_ID),
+ .revn = 660,
+ .name = "A660",
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a660_zap.mdt",
+ .hwcg = a660_hwcg,
+ .address_space_size = SZ_16G,
+ }, {
+ .rev = ADRENO_REV(6, 3, 5, ANY_ID),
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .hwcg = a660_hwcg,
+ .address_space_size = SZ_16G,
+ }, {
+ .rev = ADRENO_REV(6, 8, 0, ANY_ID),
+ .revn = 680,
+ .name = "A680",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_2M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ .hwcg = a640_hwcg,
+ },
+};
+
+MODULE_FIRMWARE("qcom/a300_pm4.fw");
+MODULE_FIRMWARE("qcom/a300_pfp.fw");
+MODULE_FIRMWARE("qcom/a330_pm4.fw");
+MODULE_FIRMWARE("qcom/a330_pfp.fw");
+MODULE_FIRMWARE("qcom/a420_pm4.fw");
+MODULE_FIRMWARE("qcom/a420_pfp.fw");
+MODULE_FIRMWARE("qcom/a530_pm4.fw");
+MODULE_FIRMWARE("qcom/a530_pfp.fw");
+MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
+MODULE_FIRMWARE("qcom/a530_zap.mdt");
+MODULE_FIRMWARE("qcom/a530_zap.b00");
+MODULE_FIRMWARE("qcom/a530_zap.b01");
+MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a619_gmu.bin");
+MODULE_FIRMWARE("qcom/a630_sqe.fw");
+MODULE_FIRMWARE("qcom/a630_gmu.bin");
+MODULE_FIRMWARE("qcom/a630_zap.mbn");
+
+static inline bool _rev_match(uint8_t entry, uint8_t id)
+{
+ return (entry == ANY_ID) || (entry == id);
+}
+
+bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2)
+{
+
+ return _rev_match(rev1.core, rev2.core) &&
+ _rev_match(rev1.major, rev2.major) &&
+ _rev_match(rev1.minor, rev2.minor) &&
+ _rev_match(rev1.patchid, rev2.patchid);
+}
+
+const struct adreno_info *adreno_info(struct adreno_rev rev)
+{
+ int i;
+
+ /* identify gpu: */
+ for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
+ const struct adreno_info *info = &gpulist[i];
+ if (adreno_cmp_rev(info->rev, rev))
+ return info;
+ }
+
+ return NULL;
+}
+
+struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu *gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ int ret;
+
+ if (pdev)
+ gpu = dev_to_gpu(&pdev->dev);
+
+ if (!gpu) {
+ dev_err_once(dev->dev, "no GPU device was found\n");
+ return NULL;
+ }
+
+ adreno_gpu = to_adreno_gpu(gpu);
+
+ /*
+ * The number one reason for HW init to fail is if the firmware isn't
+ * loaded yet. Try that first and don't bother continuing on
+ * otherwise
+ */
+
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
+ return NULL;
+
+ /*
+ * Now that we have firmware loaded, and are ready to begin
+ * booting the gpu, go ahead and enable runpm:
+ */
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ goto err_disable_rpm;
+ }
+
+ mutex_lock(&gpu->lock);
+ ret = msm_gpu_hw_init(gpu);
+ mutex_unlock(&gpu->lock);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
+ goto err_put_rpm;
+ }
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+#ifdef CONFIG_DEBUG_FS
+ if (gpu->funcs->debugfs_init) {
+ gpu->funcs->debugfs_init(gpu, dev->primary);
+ gpu->funcs->debugfs_init(gpu, dev->render);
+ }
+#endif
+
+ return gpu;
+
+err_put_rpm:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+err_disable_rpm:
+ pm_runtime_disable(&pdev->dev);
+
+ return NULL;
+}
+
+static int find_chipid(struct device *dev, struct adreno_rev *rev)
+{
+ struct device_node *node = dev->of_node;
+ const char *compat;
+ int ret;
+ u32 chipid;
+
+ /* first search the compat strings for qcom,adreno-XYZ.W: */
+ ret = of_property_read_string_index(node, "compatible", 0, &compat);
+ if (ret == 0) {
+ unsigned int r, patch;
+
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+ sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
+ rev->core = r / 100;
+ r %= 100;
+ rev->major = r / 10;
+ r %= 10;
+ rev->minor = r;
+ rev->patchid = patch;
+
+ return 0;
+ }
+ }
+
+ /* and if that fails, fall back to legacy "qcom,chipid" property: */
+ ret = of_property_read_u32(node, "qcom,chipid", &chipid);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
+ return ret;
+ }
+
+ rev->core = (chipid >> 24) & 0xff;
+ rev->major = (chipid >> 16) & 0xff;
+ rev->minor = (chipid >> 8) & 0xff;
+ rev->patchid = (chipid & 0xff);
+
+ dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+ dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
+ rev->core, rev->major, rev->minor, rev->patchid);
+
+ return 0;
+}
+
+static int adreno_bind(struct device *dev, struct device *master, void *data)
+{
+ static struct adreno_platform_config config = {};
+ const struct adreno_info *info;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *drm = priv->dev;
+ struct msm_gpu *gpu;
+ int ret;
+
+ ret = find_chipid(dev, &config.rev);
+ if (ret)
+ return ret;
+
+ dev->platform_data = &config;
+ priv->gpu_pdev = to_platform_device(dev);
+
+ info = adreno_info(config.rev);
+
+ if (!info) {
+ dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
+ config.rev.core, config.rev.major,
+ config.rev.minor, config.rev.patchid);
+ return -ENXIO;
+ }
+
+ DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
+ config.rev.minor, config.rev.patchid);
+
+ priv->is_a2xx = config.rev.core == 2;
+ priv->has_cached_coherent = config.rev.core >= 6;
+
+ gpu = info->init(drm);
+ if (IS_ERR(gpu)) {
+ dev_warn(drm->dev, "failed to load adreno gpu\n");
+ return PTR_ERR(gpu);
+ }
+
+ return 0;
+}
+
+static int adreno_system_suspend(struct device *dev);
+static void adreno_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ if (pm_runtime_enabled(dev))
+ WARN_ON_ONCE(adreno_system_suspend(dev));
+ gpu->funcs->destroy(gpu);
+
+ priv->gpu_pdev = NULL;
+}
+
+static const struct component_ops a3xx_ops = {
+ .bind = adreno_bind,
+ .unbind = adreno_unbind,
+};
+
+static void adreno_device_register_headless(void)
+{
+ /* on imx5, we don't have a top-level mdp/dpu node
+ * this creates a dummy node for the driver for that case
+ */
+ struct platform_device_info dummy_info = {
+ .parent = NULL,
+ .name = "msm",
+ .id = -1,
+ .res = NULL,
+ .num_res = 0,
+ .data = NULL,
+ .size_data = 0,
+ .dma_mask = ~0,
+ };
+ platform_device_register_full(&dummy_info);
+}
+
+static int adreno_probe(struct platform_device *pdev)
+{
+
+ int ret;
+
+ ret = component_add(&pdev->dev, &a3xx_ops);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+ adreno_device_register_headless();
+
+ return 0;
+}
+
+static int adreno_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &a3xx_ops);
+ return 0;
+}
+
+static void adreno_shutdown(struct platform_device *pdev)
+{
+ WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,adreno" },
+ { .compatible = "qcom,adreno-3xx" },
+ /* for compatibility with imx5 gpu: */
+ { .compatible = "amd,imageon" },
+ /* for backwards compat w/ downstream kgsl DT files: */
+ { .compatible = "qcom,kgsl-3d0" },
+ {}
+};
+
+static int adreno_runtime_resume(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_runtime_suspend(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ /*
+ * We should be holding a runpm ref, which will prevent
+ * runtime suspend. In the system suspend path, we've
+ * already waited for active jobs to complete.
+ */
+ WARN_ON_ONCE(gpu->active_submits);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ /*
+ * Shut down the scheduler before we force suspend, so that
+ * suspend isn't racing with scheduler kthread feeding us
+ * more work.
+ *
+ * Note, we just want to park the thread, and let any jobs
+ * that are already on the hw queue complete normally, as
+ * opposed to the drm_sched_stop() path used for handling
+ * faulting/timed-out jobs. We can't really cancel any jobs
+ * already on the hw queue without racing with the GPU.
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_park(sched->thread);
+ }
+}
+
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_unpark(sched->thread);
+ }
+}
+
+static int adreno_system_suspend(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+ int remaining, ret;
+
+ if (!gpu)
+ return 0;
+
+ suspend_scheduler(gpu);
+
+ remaining = wait_event_timeout(gpu->retire_event,
+ gpu->active_submits == 0,
+ msecs_to_jiffies(1000));
+ if (remaining == 0) {
+ dev_err(dev, "Timeout waiting for GPU to suspend\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = pm_runtime_force_suspend(dev);
+out:
+ if (ret)
+ resume_scheduler(gpu);
+
+ return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ if (!gpu)
+ return 0;
+
+ resume_scheduler(gpu);
+ return pm_runtime_force_resume(dev);
+}
+
+static const struct dev_pm_ops adreno_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+ RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
+};
+
+static struct platform_driver adreno_driver = {
+ .probe = adreno_probe,
+ .remove = adreno_remove,
+ .shutdown = adreno_shutdown,
+ .driver = {
+ .name = "adreno",
+ .of_match_table = dt_match,
+ .pm = &adreno_pm_ops,
+ },
+};
+
+void __init adreno_register(void)
+{
+ platform_driver_register(&adreno_driver);
+}
+
+void __exit adreno_unregister(void)
+{
+ platform_driver_unregister(&adreno_driver);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 000000000..dfd4eec21
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,1095 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ascii85.h>
+#include <linux/interconnect.h>
+#include <linux/qcom_scm.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/nvmem-consumer.h>
+#include <soc/qcom/ocmem.h>
+#include "adreno_gpu.h"
+#include "a6xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+static u64 address_space_size = 0;
+MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
+module_param(address_space_size, ullong, 0600);
+
+static bool zap_available = true;
+
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ u32 pasid)
+{
+ struct device *dev = &gpu->pdev->dev;
+ const struct firmware *fw;
+ const char *signed_fwname = NULL;
+ struct device_node *np, *mem_np;
+ struct resource r;
+ phys_addr_t mem_phys;
+ ssize_t mem_size;
+ void *mem_region = NULL;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np) {
+ zap_available = false;
+ return -ENODEV;
+ }
+
+ mem_np = of_parse_phandle(np, "memory-region", 0);
+ of_node_put(np);
+ if (!mem_np) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(mem_np, 0, &r);
+ of_node_put(mem_np);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+
+ /*
+ * Check for a firmware-name property. This is the new scheme
+ * to handle firmware that may be signed with device specific
+ * keys, allowing us to have a different zap fw path for different
+ * devices.
+ *
+ * If the firmware-name property is found, we bypass the
+ * adreno_request_fw() mechanism, because we don't need to handle
+ * the /lib/firmware/qcom/... vs /lib/firmware/... case.
+ *
+ * If the firmware-name property is not found, for backwards
+ * compatibility we fall back to the fwname from the gpulist
+ * table.
+ */
+ of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
+ if (signed_fwname) {
+ fwname = signed_fwname;
+ ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
+ if (ret)
+ fw = ERR_PTR(ret);
+ } else if (fwname) {
+ /* Request the MDT file from the default location: */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ } else {
+ /*
+ * For new targets, we require the firmware-name property,
+ * if a zap-shader is required, rather than falling back
+ * to a firmware name specified in gpulist.
+ *
+ * Because the firmware is signed with a (potentially)
+ * device specific key, having the name come from gpulist
+ * was a bad idea, and is only provided for backwards
+ * compatibility for older targets.
+ */
+ return -ENODEV;
+ }
+
+ if (IS_ERR(fw)) {
+ DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
+ return PTR_ERR(fw);
+ }
+
+ /* Figure out how much memory we need */
+ mem_size = qcom_mdt_get_size(fw);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto out;
+ }
+
+ if (mem_size > resource_size(&r)) {
+ DRM_DEV_ERROR(dev,
+ "memory region is too small to load the MDT\n");
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* Allocate memory for the firmware image */
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten through adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
+ ret = qcom_mdt_load(dev, fw, fwname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ } else {
+ char *newname;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
+ }
+ if (ret)
+ goto out;
+
+ /* Send the image to the secure world */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+
+ /*
+ * If the scm call returns -EOPNOTSUPP we assume that this target
+ * doesn't need/support the zap shader so quietly fail
+ */
+ if (ret == -EOPNOTSUPP)
+ zap_available = false;
+ else if (ret)
+ DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
+
+out:
+ if (mem_region)
+ memunmap(mem_region);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct platform_device *pdev = gpu->pdev;
+
+ /* Short cut if we determine the zap shader isn't available/needed */
+ if (!zap_available)
+ return -ENODEV;
+
+ /* We need SCM to be able to load the firmware */
+ if (!qcom_scm_is_available()) {
+ DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
+ return -EPROBE_DEFER;
+ }
+
+ return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
+}
+
+void adreno_set_llc_attributes(struct iommu_domain *iommu)
+{
+ iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
+}
+
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev)
+{
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ u64 start, size;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ mmu = msm_iommu_new(&pdev->dev, iommu);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(iommu);
+ return ERR_CAST(mmu);
+ }
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+ size = iommu->geometry.aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+u64 adreno_private_address_space_size(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ if (address_space_size)
+ return address_space_size;
+
+ if (adreno_gpu->info->address_space_size)
+ return adreno_gpu->info->address_space_size;
+
+ return SZ_4G;
+}
+
+int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t *value, uint32_t *len)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* No pointer params yet */
+ if (*len != 0)
+ return -EINVAL;
+
+ switch (param) {
+ case MSM_PARAM_GPU_ID:
+ *value = adreno_gpu->info->revn;
+ return 0;
+ case MSM_PARAM_GMEM_SIZE:
+ *value = adreno_gpu->gmem;
+ return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
+ return 0;
+ case MSM_PARAM_CHIP_ID:
+ *value = (uint64_t)adreno_gpu->rev.patchid |
+ ((uint64_t)adreno_gpu->rev.minor << 8) |
+ ((uint64_t)adreno_gpu->rev.major << 16) |
+ ((uint64_t)adreno_gpu->rev.core << 24);
+ if (!adreno_gpu->info->revn)
+ *value |= ((uint64_t) adreno_gpu->speedbin) << 32;
+ return 0;
+ case MSM_PARAM_MAX_FREQ:
+ *value = adreno_gpu->base.fast_rate;
+ return 0;
+ case MSM_PARAM_TIMESTAMP:
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
+ return -EINVAL;
+ case MSM_PARAM_PRIORITIES:
+ *value = gpu->nr_rings * NR_SCHED_PRIORITIES;
+ return 0;
+ case MSM_PARAM_PP_PGTABLE:
+ *value = 0;
+ return 0;
+ case MSM_PARAM_FAULTS:
+ if (ctx->aspace)
+ *value = gpu->global_faults + ctx->aspace->faults;
+ else
+ *value = gpu->global_faults;
+ return 0;
+ case MSM_PARAM_SUSPENDS:
+ *value = gpu->suspend_count;
+ return 0;
+ case MSM_PARAM_VA_START:
+ if (ctx->aspace == gpu->aspace)
+ return -EINVAL;
+ *value = ctx->aspace->va_start;
+ return 0;
+ case MSM_PARAM_VA_SIZE:
+ if (ctx->aspace == gpu->aspace)
+ return -EINVAL;
+ *value = ctx->aspace->va_size;
+ return 0;
+ default:
+ DBG("%s: invalid param: %u", gpu->name, param);
+ return -EINVAL;
+ }
+}
+
+int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t value, uint32_t len)
+{
+ switch (param) {
+ case MSM_PARAM_COMM:
+ case MSM_PARAM_CMDLINE:
+ /* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so
+ * that should be a reasonable upper bound
+ */
+ if (len > PAGE_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ if (len != 0)
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case MSM_PARAM_COMM:
+ case MSM_PARAM_CMDLINE: {
+ char *str, **paramp;
+
+ str = kmalloc(len + 1, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ if (copy_from_user(str, u64_to_user_ptr(value), len)) {
+ kfree(str);
+ return -EFAULT;
+ }
+
+ /* Ensure string is null terminated: */
+ str[len] = '\0';
+
+ mutex_lock(&gpu->lock);
+
+ if (param == MSM_PARAM_COMM) {
+ paramp = &ctx->comm;
+ } else {
+ paramp = &ctx->cmdline;
+ }
+
+ kfree(*paramp);
+ *paramp = str;
+
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+ }
+ case MSM_PARAM_SYSPROF:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return msm_file_private_set_sysprof(ctx, gpu, value);
+ default:
+ DBG("%s: invalid param: %u", gpu->name, param);
+ return -EINVAL;
+ }
+}
+
+const struct firmware *
+adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
+{
+ struct drm_device *drm = adreno_gpu->base.dev;
+ const struct firmware *fw = NULL;
+ char *newname;
+ int ret;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+ if (!newname)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try first to load from qcom/$fwfile using a direct load (to avoid
+ * a potential timeout waiting for usermode helper)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
+
+ ret = request_firmware_direct(&fw, newname, drm->dev);
+ if (!ret) {
+ DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_NEW;
+ goto out;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ fw = ERR_PTR(ret);
+ goto out;
+ }
+ }
+
+ /*
+ * Then try the legacy location without qcom/ prefix
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
+
+ ret = request_firmware_direct(&fw, fwname, drm->dev);
+ if (!ret) {
+ DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_LEGACY;
+ goto out;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
+ fwname, ret);
+ fw = ERR_PTR(ret);
+ goto out;
+ }
+ }
+
+ /*
+ * Finally fall back to request_firmware() for cases where the
+ * usermode helper is needed (I think mainly android)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
+
+ ret = request_firmware(&fw, newname, drm->dev);
+ if (!ret) {
+ DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_HELPER;
+ goto out;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ fw = ERR_PTR(ret);
+ goto out;
+ }
+ }
+
+ DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
+ fw = ERR_PTR(-ENOENT);
+out:
+ kfree(newname);
+ return fw;
+}
+
+int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
+ const struct firmware *fw;
+
+ if (!adreno_gpu->info->fw[i])
+ continue;
+
+ /* Skip if the firmware has already been loaded */
+ if (adreno_gpu->fw[i])
+ continue;
+
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
+
+ adreno_gpu->fw[i] = fw;
+ }
+
+ return 0;
+}
+
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+
+ return bo;
+}
+
+int adreno_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret, i;
+
+ VERB("%s", gpu->name);
+
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ if (!ring)
+ continue;
+
+ ring->cur = ring->start;
+ ring->next = ring->start;
+ ring->memptrs->rptr = 0;
+
+ /* Detect and clean up an impossible fence, ie. if GPU managed
+ * to scribble something invalid, we don't want that to confuse
+ * us into mistakingly believing that submits have completed.
+ */
+ if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
+ ring->memptrs->fence = ring->fctx->last_fence;
+ }
+ }
+
+ return 0;
+}
+
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ return gpu->funcs->get_rptr(gpu, ring);
+}
+
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
+{
+ return gpu->rb[0];
+}
+
+void adreno_recover(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret;
+
+ // XXX pm-runtime?? we *need* the device to be off after this
+ // so maybe continuing to call ->pm_suspend/resume() is better?
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->pm_resume(gpu);
+
+ ret = msm_gpu_hw_init(gpu);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
+ /* hmm, oh well? */
+ }
+}
+
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
+{
+ uint32_t wptr;
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /*
+ * Mask wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(ring);
+
+ /* ensure writes to ringbuffer have hit system memory: */
+ mb();
+
+ gpu_write(gpu, reg, wptr);
+}
+
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t wptr = get_wptr(ring);
+
+ /* wait for CP to drain ringbuffer: */
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
+ return true;
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
+ return false;
+}
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count = 0;
+
+ WARN_ON(!mutex_is_locked(&gpu->lock));
+
+ kref_init(&state->ref);
+
+ ktime_get_real_ts64(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int size = 0, j;
+
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].iova = gpu->rb[i]->iova;
+ state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+ /* Copy at least 'wptr' dwords of the data */
+ size = state->ring[i].wptr;
+
+ /* After wptr find the last non zero dword to save space */
+ for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+ if (gpu->rb[i]->start[j])
+ size = j + 1;
+
+ if (size) {
+ state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
+ if (state->ring[i].data) {
+ memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data_size = size << 2;
+ }
+ }
+ }
+
+ /* Some targets prefer to collect their own registers */
+ if (!adreno_gpu->registers)
+ return 0;
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 start = adreno_gpu->registers[i];
+ u32 end = adreno_gpu->registers[i + 1];
+ u32 addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+ kvfree(state->ring[i].data);
+
+ for (i = 0; state->bos && i < state->nr_bos; i++)
+ kvfree(state->bos[i].data);
+
+ kfree(state->bos);
+ kfree(state->comm);
+ kfree(state->cmd);
+ kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+
+ adreno_gpu_state_destroy(state);
+ kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
+{
+ void *buf;
+ size_t buf_itr = 0, buffer_size;
+ char out[ASCII85_BUFSZ];
+ long l;
+ int i;
+
+ if (!src || !len)
+ return NULL;
+
+ l = ascii85_encode_len(len);
+
+ /*
+ * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+ * account for the worst case of 5 bytes per dword plus the 1 for '\0'
+ */
+ buffer_size = (l * 5) + 1;
+
+ buf = kvmalloc(buffer_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ for (i = 0; i < l; i++)
+ buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ ascii85_encode(src[i], out));
+
+ return buf;
+}
+
+/* len is expected to be in bytes
+ *
+ * WARNING: *ptr should be allocated with kvmalloc or friends. It can be free'd
+ * with kvfree() and replaced with a newly kvmalloc'd buffer on the first call
+ * when the unencoded raw data is encoded
+ */
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded)
+{
+ if (!*ptr || !len)
+ return;
+
+ if (!*encoded) {
+ long datalen, i;
+ u32 *buf = *ptr;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will
+ * any data completely fill the entire allocated size of
+ * the buffer.
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++)
+ if (buf[i])
+ datalen = ((i + 1) << 2);
+
+ /*
+ * If we reach here, then the originally captured binary buffer
+ * will be replaced with the ascii85 encoded string
+ */
+ *ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+ kvfree(buf);
+
+ *encoded = true;
+ }
+
+ if (!*ptr)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+ drm_puts(p, *ptr);
+
+ drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
+ adreno_gpu->info->revn, adreno_gpu->rev.core,
+ adreno_gpu->rev.major, adreno_gpu->rev.minor,
+ adreno_gpu->rev.patchid);
+ /*
+ * If this is state collected due to iova fault, so fault related info
+ *
+ * TTBR0 would not be zero, so this is a good way to distinguish
+ */
+ if (state->fault_info.ttbr0) {
+ const struct msm_gpu_fault_info *info = &state->fault_info;
+
+ drm_puts(p, "fault-info:\n");
+ drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0);
+ drm_printf(p, " - iova=%.16lx\n", info->iova);
+ drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
+ drm_printf(p, " - type=%s\n", info->type);
+ drm_printf(p, " - source=%s\n", info->block);
+ }
+
+ drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
+
+ drm_puts(p, "ringbuffer:\n");
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ drm_printf(p, " - id: %d\n", i);
+ drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
+ drm_printf(p, " last-fence: %u\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %u\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %u\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %u\n", state->ring[i].wptr);
+ drm_printf(p, " size: %u\n", MSM_GPU_RINGBUFFER_SZ);
+
+ adreno_show_object(p, &state->ring[i].data,
+ state->ring[i].data_size, &state->ring[i].encoded);
+ }
+
+ if (state->bos) {
+ drm_puts(p, "bos:\n");
+
+ for (i = 0; i < state->nr_bos; i++) {
+ drm_printf(p, " - iova: 0x%016llx\n",
+ state->bos[i].iova);
+ drm_printf(p, " size: %zd\n", state->bos[i].size);
+ drm_printf(p, " name: %-32s\n", state->bos[i].name);
+
+ adreno_show_object(p, &state->bos[i].data,
+ state->bos[i].size, &state->bos[i].encoded);
+ }
+ }
+
+ if (state->nr_registers) {
+ drm_puts(p, "registers:\n");
+
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
+ }
+}
+#endif
+
+/* Dump common gpu status and scratch registers on any hang, to make
+ * the hangcheck logs more useful. The scratch registers seem always
+ * safe to read when GPU has hung (unlike some other regs, depending
+ * on how the GPU hung), and they are useful to match up to cmdstream
+ * dumps when debugging hangs:
+ */
+void adreno_dump_info(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ printk("revision: %d (%d.%d.%d.%d)\n",
+ adreno_gpu->info->revn, adreno_gpu->rev.core,
+ adreno_gpu->rev.major, adreno_gpu->rev.minor,
+ adreno_gpu->rev.patchid);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ printk("rb %d: fence: %d/%d\n", i,
+ ring->memptrs->fence,
+ ring->fctx->last_fence);
+
+ printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
+ printk("rb wptr: %d\n", get_wptr(ring));
+ }
+}
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+void adreno_dump(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (!adreno_gpu->registers)
+ return;
+
+ /* dump these out in a form that can be parsed by demsm: */
+ printk("IO:region %s 00000000 00020000\n", gpu->name);
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ uint32_t start = adreno_gpu->registers[i];
+ uint32_t end = adreno_gpu->registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ uint32_t val = gpu_read(gpu, addr);
+ printk("IO:R %08x %08x\n", addr<<2, val);
+ }
+ }
+}
+
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
+ return (rptr + (size - 1) - wptr) % size;
+}
+
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
+{
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_DEV_ERROR(ring->gpu->dev->dev,
+ "timeout waiting for space in ringbuffer %d\n",
+ ring->id);
+}
+
+/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
+static int adreno_get_legacy_pwrlevels(struct device *dev)
+{
+ struct device_node *child, *node;
+ int ret;
+
+ node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
+ if (!node) {
+ DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n");
+ return -ENXIO;
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int val;
+
+ ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
+ if (ret)
+ continue;
+
+ /*
+ * Skip the intentionally bogus clock value found at the bottom
+ * of most legacy frequency tables
+ */
+ if (val != 27000000)
+ dev_pm_opp_add(dev, val, 0);
+ }
+
+ of_node_put(node);
+
+ return 0;
+}
+
+static void adreno_get_pwrlevels(struct device *dev,
+ struct msm_gpu *gpu)
+{
+ unsigned long freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ gpu->fast_rate = 0;
+
+ /* You down with OPP? */
+ if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
+ ret = adreno_get_legacy_pwrlevels(dev);
+ else {
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
+ }
+
+ if (!ret) {
+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (!IS_ERR(opp)) {
+ gpu->fast_rate = freq;
+ dev_pm_opp_put(opp);
+ }
+ }
+
+ if (!gpu->fast_rate) {
+ dev_warn(dev,
+ "Could not find a clock rate. Using a reasonable default\n");
+ /* Pick a suitably safe clock speed for any target */
+ gpu->fast_rate = 200000000;
+ }
+
+ DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+}
+
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *adreno_ocmem)
+{
+ struct ocmem_buf *ocmem_hdl;
+ struct ocmem *ocmem;
+
+ ocmem = of_get_ocmem(dev);
+ if (IS_ERR(ocmem)) {
+ if (PTR_ERR(ocmem) == -ENODEV) {
+ /*
+ * Return success since either the ocmem property was
+ * not specified in device tree, or ocmem support is
+ * not compiled into the kernel.
+ */
+ return 0;
+ }
+
+ return PTR_ERR(ocmem);
+ }
+
+ ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem);
+ if (IS_ERR(ocmem_hdl))
+ return PTR_ERR(ocmem_hdl);
+
+ adreno_ocmem->ocmem = ocmem;
+ adreno_ocmem->base = ocmem_hdl->addr;
+ adreno_ocmem->hdl = ocmem_hdl;
+ adreno_gpu->gmem = ocmem_hdl->len;
+
+ return 0;
+}
+
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
+{
+ if (adreno_ocmem && adreno_ocmem->base)
+ ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
+ adreno_ocmem->hdl);
+}
+
+int adreno_read_speedbin(struct device *dev, u32 *speedbin)
+{
+ return nvmem_cell_read_variable_le_u32(dev, "speed_bin", speedbin);
+}
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs, int nr_rings)
+{
+ struct device *dev = &pdev->dev;
+ struct adreno_platform_config *config = dev->platform_data;
+ struct msm_gpu_config adreno_gpu_config = { 0 };
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct adreno_rev *rev = &config->rev;
+ const char *gpu_name;
+ u32 speedbin;
+
+ adreno_gpu->funcs = funcs;
+ adreno_gpu->info = adreno_info(config->rev);
+ adreno_gpu->gmem = adreno_gpu->info->gmem;
+ adreno_gpu->revn = adreno_gpu->info->revn;
+ adreno_gpu->rev = *rev;
+
+ if (adreno_read_speedbin(dev, &speedbin) || !speedbin)
+ speedbin = 0xffff;
+ adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin);
+
+ gpu_name = adreno_gpu->info->name;
+ if (!gpu_name) {
+ gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%d.%d.%d.%d",
+ rev->core, rev->major, rev->minor,
+ rev->patchid);
+ if (!gpu_name)
+ return -ENOMEM;
+ }
+
+ adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
+
+ adreno_gpu_config.nr_rings = nr_rings;
+
+ adreno_get_pwrlevels(dev, gpu);
+
+ pm_runtime_set_autosuspend_delay(dev,
+ adreno_gpu->info->inactive_period);
+ pm_runtime_use_autosuspend(dev);
+
+ return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+ gpu_name, &adreno_gpu_config);
+}
+
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ release_firmware(adreno_gpu->fw[i]);
+
+ if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
+ pm_runtime_disable(&priv->gpu_pdev->dev);
+
+ msm_gpu_cleanup(&adreno_gpu->base);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 000000000..3d78efb06
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __ADRENO_GPU_H__
+#define __ADRENO_GPU_H__
+
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+
+#include "msm_gpu.h"
+
+#include "adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+
+extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
+
+enum {
+ ADRENO_FW_PM4 = 0,
+ ADRENO_FW_SQE = 0, /* a6xx */
+ ADRENO_FW_PFP = 1,
+ ADRENO_FW_GMU = 1, /* a6xx */
+ ADRENO_FW_GPMU = 2,
+ ADRENO_FW_MAX,
+};
+
+#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
+#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
+#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
+
+struct adreno_rev {
+ uint8_t core;
+ uint8_t major;
+ uint8_t minor;
+ uint8_t patchid;
+};
+
+#define ANY_ID 0xff
+
+#define ADRENO_REV(core, major, minor, patchid) \
+ ((struct adreno_rev){ core, major, minor, patchid })
+
+struct adreno_gpu_funcs {
+ struct msm_gpu_funcs base;
+ int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
+};
+
+struct adreno_reglist {
+ u32 offset;
+ u32 value;
+};
+
+extern const struct adreno_reglist a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[];
+
+struct adreno_info {
+ struct adreno_rev rev;
+ uint32_t revn;
+ const char *name;
+ const char *fw[ADRENO_FW_MAX];
+ uint32_t gmem;
+ u64 quirks;
+ struct msm_gpu *(*init)(struct drm_device *dev);
+ const char *zapfw;
+ u32 inactive_period;
+ const struct adreno_reglist *hwcg;
+ u64 address_space_size;
+};
+
+const struct adreno_info *adreno_info(struct adreno_rev rev);
+
+struct adreno_gpu {
+ struct msm_gpu base;
+ struct adreno_rev rev;
+ const struct adreno_info *info;
+ uint32_t gmem; /* actual gmem size */
+ uint32_t revn; /* numeric revision name */
+ uint16_t speedbin;
+ const struct adreno_gpu_funcs *funcs;
+
+ /* interesting register offsets to dump: */
+ const unsigned int *registers;
+
+ /*
+ * Are we loading fw from legacy path? Prior to addition
+ * of gpu firmware to linux-firmware, the fw files were
+ * placed in toplevel firmware directory, following qcom's
+ * android kernel. But linux-firmware preferred they be
+ * placed in a 'qcom' subdirectory.
+ *
+ * For backwards compatibility, we try first to load from
+ * the new path, using request_firmware_direct() to avoid
+ * any potential timeout waiting for usermode helper, then
+ * fall back to the old path (with direct load). And
+ * finally fall back to request_firmware() with the new
+ * path to allow the usermode helper.
+ */
+ enum {
+ FW_LOCATION_UNKNOWN = 0,
+ FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
+ FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
+ FW_LOCATION_HELPER,
+ } fwloc;
+
+ /* firmware: */
+ const struct firmware *fw[ADRENO_FW_MAX];
+
+ /*
+ * Register offsets are different between some GPUs.
+ * GPU specific offsets will be exported by GPU specific
+ * code (a3xx_gpu.c) and stored in this common location.
+ */
+ const unsigned int *reg_offsets;
+};
+#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
+
+struct adreno_ocmem {
+ struct ocmem *ocmem;
+ unsigned long base;
+ void *hdl;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct adreno_platform_config {
+ struct adreno_rev rev;
+};
+
+#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define spin_until(X) ({ \
+ int __ret = -ETIMEDOUT; \
+ unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
+ do { \
+ if (X) { \
+ __ret = 0; \
+ break; \
+ } \
+ } while (time_before(jiffies, __t)); \
+ __ret; \
+})
+
+bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
+
+static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 300);
+}
+
+static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 210);
+}
+
+static inline bool adreno_is_a225(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 225;
+}
+
+static inline bool adreno_is_a305(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 305;
+}
+
+static inline bool adreno_is_a306(struct adreno_gpu *gpu)
+{
+ /* yes, 307, because a305c is 306 */
+ return gpu->revn == 307;
+}
+
+static inline bool adreno_is_a320(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 320;
+}
+
+static inline bool adreno_is_a330(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 330;
+}
+
+static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
+{
+ return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
+}
+
+static inline int adreno_is_a405(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 405;
+}
+
+static inline int adreno_is_a420(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 420;
+}
+
+static inline int adreno_is_a430(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 430;
+}
+
+static inline int adreno_is_a506(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 506;
+}
+
+static inline int adreno_is_a508(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 508;
+}
+
+static inline int adreno_is_a509(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 509;
+}
+
+static inline int adreno_is_a510(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 510;
+}
+
+static inline int adreno_is_a512(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 512;
+}
+
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 540;
+}
+
+static inline int adreno_is_a618(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 618;
+}
+
+static inline int adreno_is_a619(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 619;
+}
+
+static inline int adreno_is_a630(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 630;
+}
+
+static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
+{
+ return (gpu->revn == 640) || (gpu->revn == 680);
+}
+
+static inline int adreno_is_a650(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 650;
+}
+
+static inline int adreno_is_7c3(struct adreno_gpu *gpu)
+{
+ /* The order of args is important here to handle ANY_ID correctly */
+ return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
+}
+
+static inline int adreno_is_a660(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 660;
+}
+
+/* check for a615, a616, a618, a619 or any derivatives */
+static inline int adreno_is_a615_family(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 615 || gpu->revn == 616 || gpu->revn == 618 || gpu->revn == 619;
+}
+
+static inline int adreno_is_a660_family(struct adreno_gpu *gpu)
+{
+ return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
+}
+
+/* check for a650, a660, or any derivatives */
+static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 650 || gpu->revn == 620 || adreno_is_a660_family(gpu);
+}
+
+u64 adreno_private_address_space_size(struct msm_gpu *gpu);
+int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t *value, uint32_t *len);
+int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t value, uint32_t len);
+const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
+ const char *fwname);
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova);
+int adreno_hw_init(struct msm_gpu *gpu);
+void adreno_recover(struct msm_gpu *gpu);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+#endif
+void adreno_dump_info(struct msm_gpu *gpu);
+void adreno_dump(struct msm_gpu *gpu);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
+
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *ocmem);
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ int nr_rings);
+void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+int adreno_load_fw(struct adreno_gpu *adreno_gpu);
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded);
+
+/*
+ * Common helper function to initialize the default address space for arm-smmu
+ * attached targets
+ */
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+void adreno_set_llc_attributes(struct iommu_domain *iommu);
+
+int adreno_read_speedbin(struct device *dev, u32 *speedbin);
+
+/*
+ * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
+ * out of secure mode
+ */
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
+
+/* ringbuffer helpers (the parts that are adreno specific) */
+
+static inline void
+OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt+1);
+ OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
+}
+
+/* no-op packet: */
+static inline void
+OUT_PKT2(struct msm_ringbuffer *ring)
+{
+ adreno_wait_ring(ring, 1);
+ OUT_RING(ring, CP_TYPE2_PKT);
+}
+
+static inline void
+OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt+1);
+ OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
+}
+
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 000000000..7aecf920f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,2365 @@
+#ifndef ADRENO_PM4_XML
+#define ADRENO_PM4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum vgt_event_type {
+ VS_DEALLOC = 0,
+ PS_DEALLOC = 1,
+ VS_DONE_TS = 2,
+ PS_DONE_TS = 3,
+ CACHE_FLUSH_TS = 4,
+ CONTEXT_DONE = 5,
+ CACHE_FLUSH = 6,
+ VIZQUERY_START = 7,
+ HLSQ_FLUSH = 7,
+ VIZQUERY_END = 8,
+ SC_WAIT_WC = 9,
+ WRITE_PRIMITIVE_COUNTS = 9,
+ START_PRIMITIVE_CTRS = 11,
+ STOP_PRIMITIVE_CTRS = 12,
+ RST_PIX_CNT = 13,
+ RST_VTX_CNT = 14,
+ TILE_FLUSH = 15,
+ STAT_EVENT = 16,
+ CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+ ZPASS_DONE = 21,
+ CACHE_FLUSH_AND_INV_EVENT = 22,
+ RB_DONE_TS = 22,
+ PERFCOUNTER_START = 23,
+ PERFCOUNTER_STOP = 24,
+ VS_FETCH_DONE = 27,
+ FACENESS_FLUSH = 28,
+ WT_DONE_TS = 8,
+ FLUSH_SO_0 = 17,
+ FLUSH_SO_1 = 18,
+ FLUSH_SO_2 = 19,
+ FLUSH_SO_3 = 20,
+ PC_CCU_INVALIDATE_DEPTH = 24,
+ PC_CCU_INVALIDATE_COLOR = 25,
+ PC_CCU_RESOLVE_TS = 26,
+ PC_CCU_FLUSH_DEPTH_TS = 28,
+ PC_CCU_FLUSH_COLOR_TS = 29,
+ BLIT = 30,
+ UNK_25 = 37,
+ LRZ_FLUSH = 38,
+ BLIT_OP_FILL_2D = 39,
+ BLIT_OP_COPY_2D = 40,
+ BLIT_OP_SCALE_2D = 42,
+ CONTEXT_DONE_2D = 43,
+ UNK_2C = 44,
+ UNK_2D = 45,
+ CACHE_INVALIDATE = 49,
+};
+
+enum pc_di_primtype {
+ DI_PT_NONE = 0,
+ DI_PT_POINTLIST_PSIZE = 1,
+ DI_PT_LINELIST = 2,
+ DI_PT_LINESTRIP = 3,
+ DI_PT_TRILIST = 4,
+ DI_PT_TRIFAN = 5,
+ DI_PT_TRISTRIP = 6,
+ DI_PT_LINELOOP = 7,
+ DI_PT_RECTLIST = 8,
+ DI_PT_POINTLIST = 9,
+ DI_PT_LINE_ADJ = 10,
+ DI_PT_LINESTRIP_ADJ = 11,
+ DI_PT_TRI_ADJ = 12,
+ DI_PT_TRISTRIP_ADJ = 13,
+ DI_PT_PATCHES0 = 31,
+ DI_PT_PATCHES1 = 32,
+ DI_PT_PATCHES2 = 33,
+ DI_PT_PATCHES3 = 34,
+ DI_PT_PATCHES4 = 35,
+ DI_PT_PATCHES5 = 36,
+ DI_PT_PATCHES6 = 37,
+ DI_PT_PATCHES7 = 38,
+ DI_PT_PATCHES8 = 39,
+ DI_PT_PATCHES9 = 40,
+ DI_PT_PATCHES10 = 41,
+ DI_PT_PATCHES11 = 42,
+ DI_PT_PATCHES12 = 43,
+ DI_PT_PATCHES13 = 44,
+ DI_PT_PATCHES14 = 45,
+ DI_PT_PATCHES15 = 46,
+ DI_PT_PATCHES16 = 47,
+ DI_PT_PATCHES17 = 48,
+ DI_PT_PATCHES18 = 49,
+ DI_PT_PATCHES19 = 50,
+ DI_PT_PATCHES20 = 51,
+ DI_PT_PATCHES21 = 52,
+ DI_PT_PATCHES22 = 53,
+ DI_PT_PATCHES23 = 54,
+ DI_PT_PATCHES24 = 55,
+ DI_PT_PATCHES25 = 56,
+ DI_PT_PATCHES26 = 57,
+ DI_PT_PATCHES27 = 58,
+ DI_PT_PATCHES28 = 59,
+ DI_PT_PATCHES29 = 60,
+ DI_PT_PATCHES30 = 61,
+ DI_PT_PATCHES31 = 62,
+};
+
+enum pc_di_src_sel {
+ DI_SRC_SEL_DMA = 0,
+ DI_SRC_SEL_IMMEDIATE = 1,
+ DI_SRC_SEL_AUTO_INDEX = 2,
+ DI_SRC_SEL_AUTO_XFB = 3,
+};
+
+enum pc_di_face_cull_sel {
+ DI_FACE_CULL_NONE = 0,
+ DI_FACE_CULL_FETCH = 1,
+ DI_FACE_BACKFACE_CULL = 2,
+ DI_FACE_FRONTFACE_CULL = 3,
+};
+
+enum pc_di_index_size {
+ INDEX_SIZE_IGN = 0,
+ INDEX_SIZE_16_BIT = 0,
+ INDEX_SIZE_32_BIT = 1,
+ INDEX_SIZE_8_BIT = 2,
+ INDEX_SIZE_INVALID = 0,
+};
+
+enum pc_di_vis_cull_mode {
+ IGNORE_VISIBILITY = 0,
+ USE_VISIBILITY = 1,
+};
+
+enum adreno_pm4_packet_type {
+ CP_TYPE0_PKT = 0,
+ CP_TYPE1_PKT = 0x40000000,
+ CP_TYPE2_PKT = 0x80000000,
+ CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
+};
+
+enum adreno_pm4_type3_packets {
+ CP_ME_INIT = 72,
+ CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
+ CP_INDIRECT_BUFFER = 63,
+ CP_INDIRECT_BUFFER_CHAIN = 87,
+ CP_INDIRECT_BUFFER_PFD = 55,
+ CP_WAIT_FOR_IDLE = 38,
+ CP_WAIT_REG_MEM = 60,
+ CP_WAIT_REG_EQ = 82,
+ CP_WAIT_REG_GTE = 83,
+ CP_WAIT_UNTIL_READ = 92,
+ CP_WAIT_IB_PFD_COMPLETE = 93,
+ CP_REG_RMW = 33,
+ CP_SET_BIN_DATA = 47,
+ CP_SET_BIN_DATA5 = 47,
+ CP_REG_TO_MEM = 62,
+ CP_MEM_WRITE = 61,
+ CP_MEM_WRITE_CNTR = 79,
+ CP_COND_EXEC = 68,
+ CP_COND_WRITE = 69,
+ CP_COND_WRITE5 = 69,
+ CP_EVENT_WRITE = 70,
+ CP_EVENT_WRITE_SHD = 88,
+ CP_EVENT_WRITE_CFL = 89,
+ CP_EVENT_WRITE_ZPD = 91,
+ CP_RUN_OPENCL = 49,
+ CP_DRAW_INDX = 34,
+ CP_DRAW_INDX_2 = 54,
+ CP_DRAW_INDX_BIN = 52,
+ CP_DRAW_INDX_2_BIN = 53,
+ CP_VIZ_QUERY = 35,
+ CP_SET_STATE = 37,
+ CP_SET_CONSTANT = 45,
+ CP_IM_LOAD = 39,
+ CP_IM_LOAD_IMMEDIATE = 43,
+ CP_LOAD_CONSTANT_CONTEXT = 46,
+ CP_INVALIDATE_STATE = 59,
+ CP_SET_SHADER_BASES = 74,
+ CP_SET_BIN_MASK = 80,
+ CP_SET_BIN_SELECT = 81,
+ CP_CONTEXT_UPDATE = 94,
+ CP_INTERRUPT = 64,
+ CP_IM_STORE = 44,
+ CP_SET_DRAW_INIT_FLAGS = 75,
+ CP_SET_PROTECTED_MODE = 95,
+ CP_BOOTSTRAP_UCODE = 111,
+ CP_LOAD_STATE = 48,
+ CP_LOAD_STATE4 = 48,
+ CP_COND_INDIRECT_BUFFER_PFE = 58,
+ CP_COND_INDIRECT_BUFFER_PFD = 50,
+ CP_INDIRECT_BUFFER_PFE = 63,
+ CP_SET_BIN = 76,
+ CP_TEST_TWO_MEMS = 113,
+ CP_REG_WR_NO_CTXT = 120,
+ CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
+ CP_WAIT_FOR_ME = 19,
+ CP_SET_DRAW_STATE = 67,
+ CP_DRAW_INDX_OFFSET = 56,
+ CP_DRAW_INDIRECT = 40,
+ CP_DRAW_INDX_INDIRECT = 41,
+ CP_DRAW_INDIRECT_MULTI = 42,
+ CP_DRAW_AUTO = 36,
+ CP_DRAW_PRED_ENABLE_GLOBAL = 25,
+ CP_DRAW_PRED_ENABLE_LOCAL = 26,
+ CP_DRAW_PRED_SET = 78,
+ CP_WIDE_REG_WRITE = 116,
+ CP_SCRATCH_TO_REG = 77,
+ CP_REG_TO_SCRATCH = 74,
+ CP_WAIT_MEM_WRITES = 18,
+ CP_COND_REG_EXEC = 71,
+ CP_MEM_TO_REG = 66,
+ CP_EXEC_CS_INDIRECT = 65,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_SET_MARKER = 101,
+ CP_SET_PSEUDO_REG = 86,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_WHERE_AM_I = 98,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
+ CP_BLIT = 44,
+ CP_REG_TEST = 57,
+ CP_SET_MODE = 99,
+ CP_LOAD_STATE6_GEOM = 50,
+ CP_LOAD_STATE6_FRAG = 52,
+ CP_LOAD_STATE6 = 54,
+ IN_IB_PREFETCH_END = 23,
+ IN_SUBBLK_PREFETCH = 31,
+ IN_INSTR_PREFETCH = 32,
+ IN_INSTR_MATCH = 71,
+ IN_CONST_PREFETCH = 73,
+ IN_INCR_UPDT_STATE = 85,
+ IN_INCR_UPDT_CONST = 86,
+ IN_INCR_UPDT_INSTR = 87,
+ PKT4 = 4,
+ CP_SCRATCH_WRITE = 76,
+ CP_REG_TO_MEM_OFFSET_MEM = 116,
+ CP_REG_TO_MEM_OFFSET_REG = 114,
+ CP_WAIT_MEM_GTE = 20,
+ CP_WAIT_TWO_REGS = 112,
+ CP_MEMCPY = 117,
+ CP_SET_BIN_DATA5_OFFSET = 46,
+ CP_SET_CTXSWITCH_IB = 85,
+ CP_REG_WRITE = 109,
+ CP_START_BIN = 80,
+ CP_END_BIN = 81,
+};
+
+enum adreno_state_block {
+ SB_VERT_TEX = 0,
+ SB_VERT_MIPADDR = 1,
+ SB_FRAG_TEX = 2,
+ SB_FRAG_MIPADDR = 3,
+ SB_VERT_SHADER = 4,
+ SB_GEOM_SHADER = 5,
+ SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
+};
+
+enum adreno_state_type {
+ ST_SHADER = 0,
+ ST_CONSTANTS = 1,
+};
+
+enum adreno_state_src {
+ SS_DIRECT = 0,
+ SS_INVALID_ALL_IC = 2,
+ SS_INVALID_PART_IC = 3,
+ SS_INDIRECT = 4,
+ SS_INDIRECT_TCM = 5,
+ SS_INDIRECT_STM = 6,
+};
+
+enum a4xx_state_block {
+ SB4_VS_TEX = 0,
+ SB4_HS_TEX = 1,
+ SB4_DS_TEX = 2,
+ SB4_GS_TEX = 3,
+ SB4_FS_TEX = 4,
+ SB4_CS_TEX = 5,
+ SB4_VS_SHADER = 8,
+ SB4_HS_SHADER = 9,
+ SB4_DS_SHADER = 10,
+ SB4_GS_SHADER = 11,
+ SB4_FS_SHADER = 12,
+ SB4_CS_SHADER = 13,
+ SB4_SSBO = 14,
+ SB4_CS_SSBO = 15,
+};
+
+enum a4xx_state_type {
+ ST4_SHADER = 0,
+ ST4_CONSTANTS = 1,
+ ST4_UBO = 2,
+};
+
+enum a4xx_state_src {
+ SS4_DIRECT = 0,
+ SS4_INDIRECT = 2,
+};
+
+enum a6xx_state_block {
+ SB6_VS_TEX = 0,
+ SB6_HS_TEX = 1,
+ SB6_DS_TEX = 2,
+ SB6_GS_TEX = 3,
+ SB6_FS_TEX = 4,
+ SB6_CS_TEX = 5,
+ SB6_VS_SHADER = 8,
+ SB6_HS_SHADER = 9,
+ SB6_DS_SHADER = 10,
+ SB6_GS_SHADER = 11,
+ SB6_FS_SHADER = 12,
+ SB6_CS_SHADER = 13,
+ SB6_IBO = 14,
+ SB6_CS_IBO = 15,
+};
+
+enum a6xx_state_type {
+ ST6_SHADER = 0,
+ ST6_CONSTANTS = 1,
+ ST6_UBO = 2,
+ ST6_IBO = 3,
+};
+
+enum a6xx_state_src {
+ SS6_DIRECT = 0,
+ SS6_BINDLESS = 1,
+ SS6_INDIRECT = 2,
+ SS6_UBO = 3,
+};
+
+enum a4xx_index_size {
+ INDEX4_SIZE_8_BIT = 0,
+ INDEX4_SIZE_16_BIT = 1,
+ INDEX4_SIZE_32_BIT = 2,
+};
+
+enum a6xx_patch_type {
+ TESS_QUADS = 0,
+ TESS_TRIANGLES = 1,
+ TESS_ISOLINES = 2,
+};
+
+enum a6xx_draw_indirect_opcode {
+ INDIRECT_OP_NORMAL = 2,
+ INDIRECT_OP_INDEXED = 4,
+ INDIRECT_OP_INDIRECT_COUNT = 6,
+ INDIRECT_OP_INDIRECT_COUNT_INDEXED = 7,
+};
+
+enum cp_draw_pred_src {
+ PRED_SRC_MEM = 5,
+};
+
+enum cp_draw_pred_test {
+ NE_0_PASS = 0,
+ EQ_0_PASS = 1,
+};
+
+enum cp_cond_function {
+ WRITE_ALWAYS = 0,
+ WRITE_LT = 1,
+ WRITE_LE = 2,
+ WRITE_EQ = 3,
+ WRITE_NE = 4,
+ WRITE_GE = 5,
+ WRITE_GT = 6,
+};
+
+enum render_mode_cmd {
+ BYPASS = 1,
+ BINNING = 2,
+ GMEM = 3,
+ BLIT2D = 5,
+ BLIT2DSCALE = 7,
+ END2D = 8,
+};
+
+enum cp_blit_cmd {
+ BLIT_OP_FILL = 0,
+ BLIT_OP_COPY = 1,
+ BLIT_OP_SCALE = 3,
+};
+
+enum a6xx_marker {
+ RM6_BYPASS = 1,
+ RM6_BINNING = 2,
+ RM6_GMEM = 4,
+ RM6_ENDVIS = 5,
+ RM6_RESOLVE = 6,
+ RM6_YIELD = 7,
+ RM6_COMPUTE = 8,
+ RM6_BLIT2DSCALE = 12,
+ RM6_IB1LIST_START = 13,
+ RM6_IB1LIST_END = 14,
+ RM6_IFPC_ENABLE = 256,
+ RM6_IFPC_DISABLE = 257,
+};
+
+enum pseudo_reg {
+ SMMU_INFO = 0,
+ NON_SECURE_SAVE_ADDR = 1,
+ SECURE_SAVE_ADDR = 2,
+ NON_PRIV_SAVE_ADDR = 3,
+ COUNTER = 4,
+};
+
+enum compare_mode {
+ PRED_TEST = 1,
+ REG_COMPARE = 2,
+ RENDER_MODE = 3,
+};
+
+enum ctxswitch_ib {
+ RESTORE_IB = 0,
+ YIELD_RESTORE_IB = 1,
+ SAVE_IB = 2,
+ RB_SAVE_IB = 3,
+};
+
+enum reg_tracker {
+ TRACK_CNTL_REG = 1,
+ TRACK_RENDER_CNTL = 2,
+ UNK_EVENT_WRITE = 4,
+};
+
+#define REG_CP_LOAD_STATE_0 0x00000000
+#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
+#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
+#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
+static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE_1 0x00000001
+#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
+#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
+{
+ return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE4_0 0x00000000
+#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE4_0_DST_OFF__SHIFT) & CP_LOAD_STATE4_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE4_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE4_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE4_0_STATE_SRC(enum a4xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE4_0_STATE_SRC__SHIFT) & CP_LOAD_STATE4_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE4_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE4_0_STATE_BLOCK(enum a4xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE4_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE4_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE4_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE4_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE4_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE4_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE4_1 0x00000001
+#define CP_LOAD_STATE4_1_STATE_TYPE__MASK 0x00000003
+#define CP_LOAD_STATE4_1_STATE_TYPE__SHIFT 0
+static inline uint32_t CP_LOAD_STATE4_1_STATE_TYPE(enum a4xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE4_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE4_1_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE4_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE4_2 0x00000002
+#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_0 0x00000000
+#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x0000c000
+#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
+static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_1 0x00000001
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_2 0x00000002
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_EXT_SRC_ADDR 0x00000001
+
+#define REG_CP_DRAW_INDX_0 0x00000000
+#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_1 0x00000001
+#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_3 0x00000003
+#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_4 0x00000004
+#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_0 0x00000000
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_1 0x00000001
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_2 0x00000002
+#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK 0x00003000
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_GS_ENABLE 0x00010000
+#define CP_DRAW_INDX_OFFSET_0_TESS_ENABLE 0x00020000
+
+#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
+#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK;
+}
+
+
+#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004
+
+#define REG_CP_DRAW_INDX_OFFSET_6 0x00000006
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE 0x00010000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE 0x00020000
+
+
+#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDIRECT_INDIRECT 0x00000001
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE 0x00010000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE 0x00020000
+
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE 0x00000001
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT 0x00000004
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_0 0x00000000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK 0x0000003f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK 0x000000c0
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK 0x00000300
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT 8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK 0x00000c00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK 0x00003000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE 0x00010000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE 0x00020000
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_1 0x00000001
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK 0x0000000f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(enum a6xx_draw_indirect_opcode val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK 0x003fff00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT 8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT 0x00000002
+
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDEXED 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDEXED 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDEXED 0x00000006
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDEXED 0x00000008
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT 0x00000007
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDIRECT_INDEXED 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDIRECT_INDEXED 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT_INDEXED 0x00000006
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT_INDEXED 0x00000008
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT_INDEXED 0x0000000a
+
+#define REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 0x00000000
+#define CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE 0x00000001
+
+#define REG_CP_DRAW_PRED_ENABLE_LOCAL_0 0x00000000
+#define CP_DRAW_PRED_ENABLE_LOCAL_0_ENABLE 0x00000001
+
+#define REG_CP_DRAW_PRED_SET_0 0x00000000
+#define CP_DRAW_PRED_SET_0_SRC__MASK 0x000000f0
+#define CP_DRAW_PRED_SET_0_SRC__SHIFT 4
+static inline uint32_t CP_DRAW_PRED_SET_0_SRC(enum cp_draw_pred_src val)
+{
+ return ((val) << CP_DRAW_PRED_SET_0_SRC__SHIFT) & CP_DRAW_PRED_SET_0_SRC__MASK;
+}
+#define CP_DRAW_PRED_SET_0_TEST__MASK 0x00000100
+#define CP_DRAW_PRED_SET_0_TEST__SHIFT 8
+static inline uint32_t CP_DRAW_PRED_SET_0_TEST(enum cp_draw_pred_test val)
+{
+ return ((val) << CP_DRAW_PRED_SET_0_TEST__SHIFT) & CP_DRAW_PRED_SET_0_TEST__MASK;
+}
+
+#define REG_CP_DRAW_PRED_SET_MEM_ADDR 0x00000001
+
+static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK;
+}
+#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_BINNING 0x00100000
+#define CP_SET_DRAW_STATE__0_GMEM 0x00200000
+#define CP_SET_DRAW_STATE__0_SYSMEM 0x00400000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK;
+}
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK;
+}
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_0 0x00000000
+
+#define REG_CP_SET_BIN_1 0x00000001
+#define CP_SET_BIN_1_X1__MASK 0x0000ffff
+#define CP_SET_BIN_1_X1__SHIFT 0
+static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
+}
+#define CP_SET_BIN_1_Y1__MASK 0xffff0000
+#define CP_SET_BIN_1_Y1__SHIFT 16
+static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
+}
+
+#define REG_CP_SET_BIN_2 0x00000002
+#define CP_SET_BIN_2_X2__MASK 0x0000ffff
+#define CP_SET_BIN_2_X2__SHIFT 0
+static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
+}
+#define CP_SET_BIN_2_Y2__MASK 0xffff0000
+#define CP_SET_BIN_2_Y2__SHIFT 16
+static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA_0 0x00000000
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA_1 0x00000001
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_0 0x00000000
+#define CP_SET_BIN_DATA5_0_VSC_SIZE__MASK 0x003f0000
+#define CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT 16
+static inline uint32_t CP_SET_BIN_DATA5_0_VSC_SIZE(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_0_VSC_SIZE__MASK;
+}
+#define CP_SET_BIN_DATA5_0_VSC_N__MASK 0x07c00000
+#define CP_SET_BIN_DATA5_0_VSC_N__SHIFT 22
+static inline uint32_t CP_SET_BIN_DATA5_0_VSC_N(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_0_VSC_N__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_1 0x00000001
+#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT) & CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_2 0x00000002
+#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT) & CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_3 0x00000003
+#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_4 0x00000004
+#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_5 0x00000005
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_6 0x00000006
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_0 0x00000000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK 0x003f0000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT 16
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK;
+}
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK 0x07c00000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT 22
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_N(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_1 0x00000001
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_2 0x00000002
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_3 0x00000003
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK;
+}
+
+#define REG_CP_REG_RMW_0 0x00000000
+#define CP_REG_RMW_0_DST_REG__MASK 0x0003ffff
+#define CP_REG_RMW_0_DST_REG__SHIFT 0
+static inline uint32_t CP_REG_RMW_0_DST_REG(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_0_DST_REG__SHIFT) & CP_REG_RMW_0_DST_REG__MASK;
+}
+#define CP_REG_RMW_0_ROTATE__MASK 0x1f000000
+#define CP_REG_RMW_0_ROTATE__SHIFT 24
+static inline uint32_t CP_REG_RMW_0_ROTATE(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_0_ROTATE__SHIFT) & CP_REG_RMW_0_ROTATE__MASK;
+}
+#define CP_REG_RMW_0_SRC1_ADD 0x20000000
+#define CP_REG_RMW_0_SRC1_IS_REG 0x40000000
+#define CP_REG_RMW_0_SRC0_IS_REG 0x80000000
+
+#define REG_CP_REG_RMW_1 0x00000001
+#define CP_REG_RMW_1_SRC0__MASK 0xffffffff
+#define CP_REG_RMW_1_SRC0__SHIFT 0
+static inline uint32_t CP_REG_RMW_1_SRC0(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_1_SRC0__SHIFT) & CP_REG_RMW_1_SRC0__MASK;
+}
+
+#define REG_CP_REG_RMW_2 0x00000002
+#define CP_REG_RMW_2_SRC1__MASK 0xffffffff
+#define CP_REG_RMW_2_SRC1__SHIFT 0
+static inline uint32_t CP_REG_RMW_2_SRC1(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_2_SRC1__SHIFT) & CP_REG_RMW_2_SRC1__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_0 0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_1 0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_2 0x00000002
+#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_0 0x00000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_64B 0x40000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_1 0x00000001
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_2 0x00000002
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_3 0x00000003
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH 0x00080000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_0 0x00000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_1 0x00000001
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_2 0x00000002
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_3 0x00000003
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_4 0x00000004
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_0 0x00000000
+#define CP_MEM_TO_REG_0_REG__MASK 0x0003ffff
+#define CP_MEM_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK;
+}
+#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000
+#define CP_MEM_TO_REG_0_CNT__SHIFT 19
+static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
+}
+#define CP_MEM_TO_REG_0_SHIFT_BY_2 0x40000000
+#define CP_MEM_TO_REG_0_UNK31 0x80000000
+
+#define REG_CP_MEM_TO_REG_1 0x00000001
+#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
+#define CP_MEM_TO_REG_1_SRC__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_2 0x00000002
+#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff
+#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_MEM_0 0x00000000
+#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
+#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
+#define CP_MEM_TO_MEM_0_NEG_C 0x00000004
+#define CP_MEM_TO_MEM_0_DOUBLE 0x20000000
+#define CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES 0x40000000
+#define CP_MEM_TO_MEM_0_UNK31 0x80000000
+
+#define REG_CP_MEMCPY_0 0x00000000
+#define CP_MEMCPY_0_DWORDS__MASK 0xffffffff
+#define CP_MEMCPY_0_DWORDS__SHIFT 0
+static inline uint32_t CP_MEMCPY_0_DWORDS(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_0_DWORDS__SHIFT) & CP_MEMCPY_0_DWORDS__MASK;
+}
+
+#define REG_CP_MEMCPY_1 0x00000001
+#define CP_MEMCPY_1_SRC_LO__MASK 0xffffffff
+#define CP_MEMCPY_1_SRC_LO__SHIFT 0
+static inline uint32_t CP_MEMCPY_1_SRC_LO(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_1_SRC_LO__SHIFT) & CP_MEMCPY_1_SRC_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_2 0x00000002
+#define CP_MEMCPY_2_SRC_HI__MASK 0xffffffff
+#define CP_MEMCPY_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEMCPY_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_2_SRC_HI__SHIFT) & CP_MEMCPY_2_SRC_HI__MASK;
+}
+
+#define REG_CP_MEMCPY_3 0x00000003
+#define CP_MEMCPY_3_DST_LO__MASK 0xffffffff
+#define CP_MEMCPY_3_DST_LO__SHIFT 0
+static inline uint32_t CP_MEMCPY_3_DST_LO(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_3_DST_LO__SHIFT) & CP_MEMCPY_3_DST_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_4 0x00000004
+#define CP_MEMCPY_4_DST_HI__MASK 0xffffffff
+#define CP_MEMCPY_4_DST_HI__SHIFT 0
+static inline uint32_t CP_MEMCPY_4_DST_HI(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_4_DST_HI__SHIFT) & CP_MEMCPY_4_DST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_SCRATCH_0 0x00000000
+#define CP_REG_TO_SCRATCH_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_SCRATCH_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_SCRATCH_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_REG__SHIFT) & CP_REG_TO_SCRATCH_0_REG__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_SCRATCH__MASK 0x00700000
+#define CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_REG_TO_SCRATCH_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT) & CP_REG_TO_SCRATCH_0_SCRATCH__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_CNT__MASK 0x07000000
+#define CP_REG_TO_SCRATCH_0_CNT__SHIFT 24
+static inline uint32_t CP_REG_TO_SCRATCH_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_CNT__SHIFT) & CP_REG_TO_SCRATCH_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_TO_REG_0 0x00000000
+#define CP_SCRATCH_TO_REG_0_REG__MASK 0x0003ffff
+#define CP_SCRATCH_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_SCRATCH_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_REG__SHIFT) & CP_SCRATCH_TO_REG_0_REG__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_UNK18 0x00040000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__MASK 0x00700000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_SCRATCH_TO_REG_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT) & CP_SCRATCH_TO_REG_0_SCRATCH__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_CNT__MASK 0x07000000
+#define CP_SCRATCH_TO_REG_0_CNT__SHIFT 24
+static inline uint32_t CP_SCRATCH_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_CNT__SHIFT) & CP_SCRATCH_TO_REG_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_WRITE_0 0x00000000
+#define CP_SCRATCH_WRITE_0_SCRATCH__MASK 0x00700000
+#define CP_SCRATCH_WRITE_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_SCRATCH_WRITE_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_WRITE_0_SCRATCH__SHIFT) & CP_SCRATCH_WRITE_0_SCRATCH__MASK;
+}
+
+#define REG_CP_MEM_WRITE_0 0x00000000
+#define CP_MEM_WRITE_0_ADDR_LO__MASK 0xffffffff
+#define CP_MEM_WRITE_0_ADDR_LO__SHIFT 0
+static inline uint32_t CP_MEM_WRITE_0_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_MEM_WRITE_0_ADDR_LO__SHIFT) & CP_MEM_WRITE_0_ADDR_LO__MASK;
+}
+
+#define REG_CP_MEM_WRITE_1 0x00000001
+#define CP_MEM_WRITE_1_ADDR_HI__MASK 0xffffffff
+#define CP_MEM_WRITE_1_ADDR_HI__SHIFT 0
+static inline uint32_t CP_MEM_WRITE_1_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_WRITE_1_ADDR_HI__SHIFT) & CP_MEM_WRITE_1_ADDR_HI__MASK;
+}
+
+#define REG_CP_COND_WRITE_0 0x00000000
+#define CP_COND_WRITE_0_FUNCTION__MASK 0x00000007
+#define CP_COND_WRITE_0_FUNCTION__SHIFT 0
+static inline uint32_t CP_COND_WRITE_0_FUNCTION(enum cp_cond_function val)
+{
+ return ((val) << CP_COND_WRITE_0_FUNCTION__SHIFT) & CP_COND_WRITE_0_FUNCTION__MASK;
+}
+#define CP_COND_WRITE_0_POLL_MEMORY 0x00000010
+#define CP_COND_WRITE_0_WRITE_MEMORY 0x00000100
+
+#define REG_CP_COND_WRITE_1 0x00000001
+#define CP_COND_WRITE_1_POLL_ADDR__MASK 0xffffffff
+#define CP_COND_WRITE_1_POLL_ADDR__SHIFT 0
+static inline uint32_t CP_COND_WRITE_1_POLL_ADDR(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_1_POLL_ADDR__SHIFT) & CP_COND_WRITE_1_POLL_ADDR__MASK;
+}
+
+#define REG_CP_COND_WRITE_2 0x00000002
+#define CP_COND_WRITE_2_REF__MASK 0xffffffff
+#define CP_COND_WRITE_2_REF__SHIFT 0
+static inline uint32_t CP_COND_WRITE_2_REF(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_2_REF__SHIFT) & CP_COND_WRITE_2_REF__MASK;
+}
+
+#define REG_CP_COND_WRITE_3 0x00000003
+#define CP_COND_WRITE_3_MASK__MASK 0xffffffff
+#define CP_COND_WRITE_3_MASK__SHIFT 0
+static inline uint32_t CP_COND_WRITE_3_MASK(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_3_MASK__SHIFT) & CP_COND_WRITE_3_MASK__MASK;
+}
+
+#define REG_CP_COND_WRITE_4 0x00000004
+#define CP_COND_WRITE_4_WRITE_ADDR__MASK 0xffffffff
+#define CP_COND_WRITE_4_WRITE_ADDR__SHIFT 0
+static inline uint32_t CP_COND_WRITE_4_WRITE_ADDR(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_4_WRITE_ADDR__SHIFT) & CP_COND_WRITE_4_WRITE_ADDR__MASK;
+}
+
+#define REG_CP_COND_WRITE_5 0x00000005
+#define CP_COND_WRITE_5_WRITE_DATA__MASK 0xffffffff
+#define CP_COND_WRITE_5_WRITE_DATA__SHIFT 0
+static inline uint32_t CP_COND_WRITE_5_WRITE_DATA(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_5_WRITE_DATA__SHIFT) & CP_COND_WRITE_5_WRITE_DATA__MASK;
+}
+
+#define REG_CP_COND_WRITE5_0 0x00000000
+#define CP_COND_WRITE5_0_FUNCTION__MASK 0x00000007
+#define CP_COND_WRITE5_0_FUNCTION__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val)
+{
+ return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK;
+}
+#define CP_COND_WRITE5_0_SIGNED_COMPARE 0x00000008
+#define CP_COND_WRITE5_0_POLL_MEMORY 0x00000010
+#define CP_COND_WRITE5_0_POLL_SCRATCH 0x00000020
+#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100
+
+#define REG_CP_COND_WRITE5_1 0x00000001
+#define CP_COND_WRITE5_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT) & CP_COND_WRITE5_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_COND_WRITE5_2 0x00000002
+#define CP_COND_WRITE5_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT) & CP_COND_WRITE5_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_COND_WRITE5_3 0x00000003
+#define CP_COND_WRITE5_3_REF__MASK 0xffffffff
+#define CP_COND_WRITE5_3_REF__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_3_REF(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_3_REF__SHIFT) & CP_COND_WRITE5_3_REF__MASK;
+}
+
+#define REG_CP_COND_WRITE5_4 0x00000004
+#define CP_COND_WRITE5_4_MASK__MASK 0xffffffff
+#define CP_COND_WRITE5_4_MASK__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_4_MASK(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_4_MASK__SHIFT) & CP_COND_WRITE5_4_MASK__MASK;
+}
+
+#define REG_CP_COND_WRITE5_5 0x00000005
+#define CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK 0xffffffff
+#define CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_5_WRITE_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT) & CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK;
+}
+
+#define REG_CP_COND_WRITE5_6 0x00000006
+#define CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK 0xffffffff
+#define CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_6_WRITE_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT) & CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK;
+}
+
+#define REG_CP_COND_WRITE5_7 0x00000007
+#define CP_COND_WRITE5_7_WRITE_DATA__MASK 0xffffffff
+#define CP_COND_WRITE5_7_WRITE_DATA__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_0 0x00000000
+#define CP_WAIT_MEM_GTE_0_RESERVED__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_0_RESERVED__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_0_RESERVED(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_0_RESERVED__SHIFT) & CP_WAIT_MEM_GTE_0_RESERVED__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_1 0x00000001
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_2 0x00000002
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_3 0x00000003
+#define CP_WAIT_MEM_GTE_3_REF__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_3_REF__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_3_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_3_REF__SHIFT) & CP_WAIT_MEM_GTE_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_0 0x00000000
+#define CP_WAIT_REG_MEM_0_FUNCTION__MASK 0x00000007
+#define CP_WAIT_REG_MEM_0_FUNCTION__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val)
+{
+ return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK;
+}
+#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE 0x00000008
+#define CP_WAIT_REG_MEM_0_POLL_MEMORY 0x00000010
+#define CP_WAIT_REG_MEM_0_POLL_SCRATCH 0x00000020
+#define CP_WAIT_REG_MEM_0_WRITE_MEMORY 0x00000100
+
+#define REG_CP_WAIT_REG_MEM_1 0x00000001
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_2 0x00000002
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_3 0x00000003
+#define CP_WAIT_REG_MEM_3_REF__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_3_REF__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_3_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_3_REF__SHIFT) & CP_WAIT_REG_MEM_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_4 0x00000004
+#define CP_WAIT_REG_MEM_4_MASK__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_4_MASK__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_4_MASK(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_4_MASK__SHIFT) & CP_WAIT_REG_MEM_4_MASK__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_5 0x00000005
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT) & CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_0 0x00000000
+#define CP_WAIT_TWO_REGS_0_REG0__MASK 0x0003ffff
+#define CP_WAIT_TWO_REGS_0_REG0__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_0_REG0(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_0_REG0__SHIFT) & CP_WAIT_TWO_REGS_0_REG0__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_1 0x00000001
+#define CP_WAIT_TWO_REGS_1_REG1__MASK 0x0003ffff
+#define CP_WAIT_TWO_REGS_1_REG1__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_1_REG1(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_1_REG1__SHIFT) & CP_WAIT_TWO_REGS_1_REG1__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_2 0x00000002
+#define CP_WAIT_TWO_REGS_2_REF__MASK 0xffffffff
+#define CP_WAIT_TWO_REGS_2_REF__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_2_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_2_REF__SHIFT) & CP_WAIT_TWO_REGS_2_REF__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff
+#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val)
+{
+ return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_VSC_ENABLE 0x00000008
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_0 0x00000000
+#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_1 0x00000001
+#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
+
+#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
+
+#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
+#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_6 0x00000006
+#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007
+
+#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
+
+#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_0 0x00000000
+#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff
+#define CP_EVENT_WRITE_0_EVENT__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
+{
+ return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
+}
+#define CP_EVENT_WRITE_0_TIMESTAMP 0x40000000
+#define CP_EVENT_WRITE_0_IRQ 0x80000000
+
+#define REG_CP_EVENT_WRITE_1 0x00000001
+#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_2 0x00000002
+#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_3 0x00000003
+
+#define REG_CP_BLIT_0 0x00000000
+#define CP_BLIT_0_OP__MASK 0x0000000f
+#define CP_BLIT_0_OP__SHIFT 0
+static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
+{
+ return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK;
+}
+
+#define REG_CP_BLIT_1 0x00000001
+#define CP_BLIT_1_SRC_X1__MASK 0x00003fff
+#define CP_BLIT_1_SRC_X1__SHIFT 0
+static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
+}
+#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000
+#define CP_BLIT_1_SRC_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK;
+}
+
+#define REG_CP_BLIT_2 0x00000002
+#define CP_BLIT_2_SRC_X2__MASK 0x00003fff
+#define CP_BLIT_2_SRC_X2__SHIFT 0
+static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
+}
+#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000
+#define CP_BLIT_2_SRC_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK;
+}
+
+#define REG_CP_BLIT_3 0x00000003
+#define CP_BLIT_3_DST_X1__MASK 0x00003fff
+#define CP_BLIT_3_DST_X1__SHIFT 0
+static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
+}
+#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000
+#define CP_BLIT_3_DST_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK;
+}
+
+#define REG_CP_BLIT_4 0x00000004
+#define CP_BLIT_4_DST_X2__MASK 0x00003fff
+#define CP_BLIT_4_DST_X2__SHIFT 0
+static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
+}
+#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000
+#define CP_BLIT_4_DST_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK;
+}
+
+#define REG_CP_EXEC_CS_0 0x00000000
+
+#define REG_CP_EXEC_CS_1 0x00000001
+#define CP_EXEC_CS_1_NGROUPS_X__MASK 0xffffffff
+#define CP_EXEC_CS_1_NGROUPS_X__SHIFT 0
+static inline uint32_t CP_EXEC_CS_1_NGROUPS_X(uint32_t val)
+{
+ return ((val) << CP_EXEC_CS_1_NGROUPS_X__SHIFT) & CP_EXEC_CS_1_NGROUPS_X__MASK;
+}
+
+#define REG_CP_EXEC_CS_2 0x00000002
+#define CP_EXEC_CS_2_NGROUPS_Y__MASK 0xffffffff
+#define CP_EXEC_CS_2_NGROUPS_Y__SHIFT 0
+static inline uint32_t CP_EXEC_CS_2_NGROUPS_Y(uint32_t val)
+{
+ return ((val) << CP_EXEC_CS_2_NGROUPS_Y__SHIFT) & CP_EXEC_CS_2_NGROUPS_Y__MASK;
+}
+
+#define REG_CP_EXEC_CS_3 0x00000003
+#define CP_EXEC_CS_3_NGROUPS_Z__MASK 0xffffffff
+#define CP_EXEC_CS_3_NGROUPS_Z__SHIFT 0
+static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
+{
+ return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
+
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
+}
+
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_CP_SET_MARKER_0 0x00000000
+#define A6XX_CP_SET_MARKER_0_MODE__MASK 0x000001ff
+#define A6XX_CP_SET_MARKER_0_MODE__SHIFT 0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MODE(enum a6xx_marker val)
+{
+ return ((val) << A6XX_CP_SET_MARKER_0_MODE__SHIFT) & A6XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A6XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
+#define A6XX_CP_SET_MARKER_0_MARKER__SHIFT 0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_marker val)
+{
+ return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A6XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A6XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A6XX_CP_REG_TEST_0 0x00000000
+#define A6XX_CP_REG_TEST_0_REG__MASK 0x0003ffff
+#define A6XX_CP_REG_TEST_0_REG__SHIFT 0
+static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A6XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
+#define A6XX_CP_REG_TEST_0_BIT__SHIFT 20
+static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A6XX_CP_REG_TEST_0_WAIT_FOR_ME 0x02000000
+
+#define REG_CP_COND_REG_EXEC_0 0x00000000
+#define CP_COND_REG_EXEC_0_REG0__MASK 0x0003ffff
+#define CP_COND_REG_EXEC_0_REG0__SHIFT 0
+static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK;
+}
+#define CP_COND_REG_EXEC_0_BINNING 0x02000000
+#define CP_COND_REG_EXEC_0_GMEM 0x04000000
+#define CP_COND_REG_EXEC_0_SYSMEM 0x08000000
+#define CP_COND_REG_EXEC_0_MODE__MASK 0xf0000000
+#define CP_COND_REG_EXEC_0_MODE__SHIFT 28
+static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val)
+{
+ return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK;
+}
+
+#define REG_CP_COND_REG_EXEC_1 0x00000001
+#define CP_COND_REG_EXEC_1_DWORDS__MASK 0xffffffff
+#define CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
+static inline uint32_t CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_1_DWORDS__SHIFT) & CP_COND_REG_EXEC_1_DWORDS__MASK;
+}
+
+#define REG_CP_COND_EXEC_0 0x00000000
+#define CP_COND_EXEC_0_ADDR0_LO__MASK 0xffffffff
+#define CP_COND_EXEC_0_ADDR0_LO__SHIFT 0
+static inline uint32_t CP_COND_EXEC_0_ADDR0_LO(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_0_ADDR0_LO__SHIFT) & CP_COND_EXEC_0_ADDR0_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_1 0x00000001
+#define CP_COND_EXEC_1_ADDR0_HI__MASK 0xffffffff
+#define CP_COND_EXEC_1_ADDR0_HI__SHIFT 0
+static inline uint32_t CP_COND_EXEC_1_ADDR0_HI(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_1_ADDR0_HI__SHIFT) & CP_COND_EXEC_1_ADDR0_HI__MASK;
+}
+
+#define REG_CP_COND_EXEC_2 0x00000002
+#define CP_COND_EXEC_2_ADDR1_LO__MASK 0xffffffff
+#define CP_COND_EXEC_2_ADDR1_LO__SHIFT 0
+static inline uint32_t CP_COND_EXEC_2_ADDR1_LO(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_2_ADDR1_LO__SHIFT) & CP_COND_EXEC_2_ADDR1_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_3 0x00000003
+#define CP_COND_EXEC_3_ADDR1_HI__MASK 0xffffffff
+#define CP_COND_EXEC_3_ADDR1_HI__SHIFT 0
+static inline uint32_t CP_COND_EXEC_3_ADDR1_HI(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_3_ADDR1_HI__SHIFT) & CP_COND_EXEC_3_ADDR1_HI__MASK;
+}
+
+#define REG_CP_COND_EXEC_4 0x00000004
+#define CP_COND_EXEC_4_REF__MASK 0xffffffff
+#define CP_COND_EXEC_4_REF__SHIFT 0
+static inline uint32_t CP_COND_EXEC_4_REF(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_4_REF__SHIFT) & CP_COND_EXEC_4_REF__MASK;
+}
+
+#define REG_CP_COND_EXEC_5 0x00000005
+#define CP_COND_EXEC_5_DWORDS__MASK 0xffffffff
+#define CP_COND_EXEC_5_DWORDS__SHIFT 0
+static inline uint32_t CP_COND_EXEC_5_DWORDS(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_5_DWORDS__SHIFT) & CP_COND_EXEC_5_DWORDS__MASK;
+}
+
+#define REG_CP_SET_CTXSWITCH_IB_0 0x00000000
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK 0xffffffff
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_0_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT) & CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK;
+}
+
+#define REG_CP_SET_CTXSWITCH_IB_1 0x00000001
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK 0xffffffff
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_1_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT) & CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK;
+}
+
+#define REG_CP_SET_CTXSWITCH_IB_2 0x00000002
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__MASK 0x000fffff
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_DWORDS(uint32_t val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT) & CP_SET_CTXSWITCH_IB_2_DWORDS__MASK;
+}
+#define CP_SET_CTXSWITCH_IB_2_TYPE__MASK 0x00300000
+#define CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT 20
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT) & CP_SET_CTXSWITCH_IB_2_TYPE__MASK;
+}
+
+#define REG_CP_REG_WRITE_0 0x00000000
+#define CP_REG_WRITE_0_TRACKER__MASK 0x00000007
+#define CP_REG_WRITE_0_TRACKER__SHIFT 0
+static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val)
+{
+ return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_0 0x00000000
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT) & CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_1 0x00000001
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK 0x0000ffff
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT) & CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK;
+}
+#define CP_SMMU_TABLE_UPDATE_1_ASID__MASK 0xffff0000
+#define CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT 16
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_ASID(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT) & CP_SMMU_TABLE_UPDATE_1_ASID__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_2 0x00000002
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT) & CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_3 0x00000003
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT) & CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK;
+}
+
+#define REG_CP_START_BIN_BIN_COUNT 0x00000000
+
+#define REG_CP_START_BIN_PREFIX_ADDR 0x00000001
+
+#define REG_CP_START_BIN_PREFIX_DWORDS 0x00000003
+
+#define REG_CP_START_BIN_BODY_DWORDS 0x00000004
+
+
+#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 000000000..b5b6e7031
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
+void dpu_core_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: none
+ */
+void dpu_core_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @kms: MSM KMS handle
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct msm_kms *kms);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+ struct dpu_kms *dpu_kms,
+ int irq_idx);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback funcion.
+ * @irq_arg: IRQ callback argument.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ */
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 000000000..1d9d83d7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,534 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/pm_opp.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ * @DPU_PERF_MODE_MAX: maximum value, used for error checking
+ */
+enum dpu_perf_mode {
+ DPU_PERF_MODE_NORMAL,
+ DPU_PERF_MODE_MINIMUM,
+ DPU_PERF_MODE_FIXED,
+ DPU_PERF_MODE_MAX
+};
+
+/**
+ * _dpu_core_perf_calc_bw() - to calculate BW per crtc
+ * @kms: pointer to the dpu_kms
+ * @crtc: pointer to a crtc
+ * Return: returns aggregated BW for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+ u64 crtc_plane_bw = 0;
+ u32 bw_factor;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ if (!pstate)
+ continue;
+
+ crtc_plane_bw += pstate->plane_fetch_bw;
+ }
+
+ bw_factor = kms->catalog->perf->bw_inefficiency_factor;
+ if (bw_factor) {
+ crtc_plane_bw *= bw_factor;
+ do_div(crtc_plane_bw, 100);
+ }
+
+ return crtc_plane_bw;
+}
+
+/**
+ * _dpu_core_perf_calc_clk() - to calculate clock per crtc
+ * @kms: pointer to the dpu_kms
+ * @crtc: pointer to a crtc
+ * @state: pointer to a crtc state
+ * Return: returns max clk for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
+ struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+ struct drm_display_mode *mode;
+ u64 crtc_clk;
+ u32 clk_factor;
+
+ mode = &state->adjusted_mode;
+
+ crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ if (!pstate)
+ continue;
+
+ crtc_clk = max(pstate->plane_clk, crtc_clk);
+ }
+
+ clk_factor = kms->catalog->perf->clk_inefficiency_factor;
+ if (clk_factor) {
+ crtc_clk *= clk_factor;
+ do_div(crtc_clk, 100);
+ }
+
+ return crtc_clk;
+}
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+ priv = crtc->dev->dev_private;
+ return to_dpu_kms(priv->kms);
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
+{
+ if (!kms || !kms->catalog || !crtc || !state || !perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ perf->bw_ctl = 0;
+ perf->max_per_pipe_ib = 0;
+ perf->core_clk_rate = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ perf->bw_ctl = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
+ perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ } else {
+ perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc);
+ perf->max_per_pipe_ib = kms->catalog->perf->min_dram_ib;
+ perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
+ }
+
+ DRM_DEBUG_ATOMIC(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ enum dpu_crtc_client_type curr_client_type;
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct dpu_kms *kms;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms->catalog) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+ return 0;
+
+ dpu_cstate = to_dpu_crtc_state(state);
+
+ /* obtain new values */
+ _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl;
+ curr_client_type = dpu_crtc_get_client_type(crtc);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (tmp_crtc->enabled &&
+ (dpu_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) && (tmp_crtc != crtc)) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
+
+ DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
+ tmp_cstate->bw_control);
+
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw);
+
+ threshold = kms->catalog->perf->max_bw_high;
+
+ DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold);
+
+ if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct dpu_core_perf_params perf = { 0 };
+ enum dpu_crtc_client_type curr_client_type
+ = dpu_crtc_get_client_type(crtc);
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int i, ret = 0;
+ u64 avg_bw;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (tmp_crtc->enabled &&
+ curr_client_type ==
+ dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
+ dpu_cstate->new_perf.max_per_pipe_ib);
+
+ perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+ DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
+ tmp_crtc->base.id,
+ dpu_cstate->new_perf.bw_ctl, kms->num_paths);
+ }
+ }
+
+ if (!kms->num_paths)
+ return 0;
+
+ avg_bw = perf.bw_ctl;
+ do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+
+ for (i = 0; i < kms->num_paths; i++)
+ icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+
+ return ret;
+}
+
+/**
+ * dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc: pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *kms;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ if (atomic_dec_return(&kms->bandwidth_ref) > 0)
+ return;
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_dpu_cmd_release_bw(crtc->base.id);
+ DRM_DEBUG_ATOMIC("Release BW crtc=%d\n", crtc->base.id);
+ dpu_crtc->cur_perf.bw_ctl = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc);
+ }
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+ u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+ struct drm_crtc *crtc;
+ struct dpu_crtc_state *dpu_cstate;
+
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (crtc->enabled) {
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+ clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk,
+ clk_rate);
+ }
+ }
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ clk_rate = kms->perf.fix_core_clk_rate;
+
+ DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate);
+
+ return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct dpu_core_perf_params *new, *old;
+ bool update_bus = false, update_clk = false;
+ u64 clk_rate = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ struct dpu_kms *kms;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ DRM_DEBUG_ATOMIC("crtc:%d stop_req:%d core_clk:%llu\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ old = &dpu_crtc->cur_perf;
+ new = &dpu_cstate->new_perf;
+
+ if (crtc->enabled && !stop_req) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+ (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
+ (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+ (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
+ DRM_DEBUG_ATOMIC("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl, old->bw_ctl);
+ old->bw_ctl = new->bw_ctl;
+ old->max_per_pipe_ib = new->max_per_pipe_ib;
+ update_bus = true;
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = true;
+ }
+ } else {
+ DRM_DEBUG_ATOMIC("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ update_bus = true;
+ update_clk = true;
+ }
+
+ trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl,
+ new->core_clk_rate, stop_req, update_bus, update_clk);
+
+ if (update_bus) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bus bw vote\n",
+ crtc->base.id);
+ return ret;
+ }
+ }
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+ trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+ clk_rate = min(clk_rate, kms->perf.max_core_clk_rate);
+ ret = dev_pm_opp_set_rate(&kms->pdev->dev, clk_rate);
+ if (ret) {
+ DPU_ERROR("failed to set core clock rate %llu\n", clk_rate);
+ return ret;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ DRM_DEBUG_ATOMIC("update clk rate = %lld HZ\n", clk_rate);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ const struct dpu_perf_cfg *cfg = perf->catalog->perf;
+ u32 perf_mode = 0;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+ if (ret)
+ return ret;
+
+ if (perf_mode >= DPU_PERF_MODE_MAX)
+ return -EINVAL;
+
+ if (perf_mode == DPU_PERF_MODE_FIXED) {
+ DRM_INFO("fix performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ DRM_INFO("minimum performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ DRM_INFO("normal performance mode\n");
+ }
+ perf->perf_tune.mode = perf_mode;
+
+ return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ int len;
+ char buf[128];
+
+ len = scnprintf(buf, sizeof(buf),
+ "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+ perf->perf_tune.mode,
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _dpu_core_perf_mode_read,
+ .write = _dpu_core_perf_mode_write,
+};
+
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
+{
+ struct dpu_core_perf *perf = &dpu_kms->perf;
+ const struct dpu_mdss_cfg *catalog = perf->catalog;
+ struct dentry *entry;
+
+ entry = debugfs_create_dir("core_perf", parent);
+
+ debugfs_create_u64("max_core_clk_rate", 0600, entry,
+ &perf->max_core_clk_rate);
+ debugfs_create_u64("core_clk_rate", 0600, entry,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, entry,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0600, entry,
+ (u32 *)&catalog->perf->max_bw_low);
+ debugfs_create_u32("threshold_high", 0600, entry,
+ (u32 *)&catalog->perf->max_bw_high);
+ debugfs_create_u32("min_core_ib", 0600, entry,
+ (u32 *)&catalog->perf->min_core_ib);
+ debugfs_create_u32("min_llcc_ib", 0600, entry,
+ (u32 *)&catalog->perf->min_llcc_ib);
+ debugfs_create_u32("min_dram_ib", 0600, entry,
+ (u32 *)&catalog->perf->min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, entry,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+ debugfs_create_u64("fix_core_clk_rate", 0600, entry,
+ &perf->fix_core_clk_rate);
+ debugfs_create_u64("fix_core_ib_vote", 0600, entry,
+ &perf->fix_core_ib_vote);
+ debugfs_create_u64("fix_core_ab_vote", 0600, entry,
+ &perf->fix_core_ab_vote);
+
+ return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+ if (!perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ const struct dpu_mdss_cfg *catalog,
+ struct clk *core_clk)
+{
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->core_clk = core_clk;
+
+ perf->max_core_clk_rate = clk_get_rate(core_clk);
+ if (!perf->max_core_clk_rate) {
+ DPU_DEBUG("optional max core clk rate, use default\n");
+ perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 000000000..29bb8ee2b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+ u64 max_per_pipe_ib;
+ u64 bw_ctl;
+ u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+ u32 mode;
+ u64 min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @core_clk: Pointer to the core clock
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ const struct dpu_mdss_cfg *catalog;
+ struct clk *core_clk;
+ u64 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct dpu_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+ u64 fix_core_clk_rate;
+ u64 fix_core_ib_vote;
+ u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ const struct dpu_mdss_cfg *catalog,
+ struct clk *core_clk);
+
+struct dpu_kms;
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 000000000..5a5821e59
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,1626 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <linux/bits.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+/* timeout in ms waiting for frame done */
+#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+
+#define CONVERT_S3_15(val) \
+ (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+
+ return to_dpu_kms(priv->kms);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ if (!crtc)
+ return;
+
+ drm_crtc_cleanup(crtc);
+ kfree(dpu_crtc);
+}
+
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
+{
+ if (!src_name ||
+ !strcmp(src_name, "none"))
+ return DPU_CRTC_CRC_SOURCE_NONE;
+ if (!strcmp(src_name, "auto") ||
+ !strcmp(src_name, "lm"))
+ return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
+ if (!strcmp(src_name, "encoder"))
+ return DPU_CRTC_CRC_SOURCE_ENCODER;
+
+ return DPU_CRTC_CRC_SOURCE_INVALID;
+}
+
+static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name, size_t *values_cnt)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) {
+ *values_cnt = crtc_state->num_mixers;
+ } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) {
+ struct drm_encoder *drm_enc;
+
+ *values_cnt = 0;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
+ *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc);
+ }
+
+ return 0;
+}
+
+static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
+{
+ struct dpu_crtc_mixer *m;
+ int i;
+
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ /* Calculate MISR over 1 frame */
+ m->hw_lm->ops.setup_misr(m->hw_lm);
+ }
+}
+
+static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_setup_misr(drm_enc);
+}
+
+static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ enum dpu_crtc_crc_source current_source;
+ struct dpu_crtc_state *crtc_state;
+ struct drm_device *drm_dev = crtc->dev;
+
+ bool was_enabled;
+ bool enable = false;
+ int ret = 0;
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ ret = drm_modeset_lock(&crtc->mutex, NULL);
+
+ if (ret)
+ return ret;
+
+ enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ spin_lock_irq(&drm_dev->event_lock);
+ current_source = crtc_state->crc_source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
+
+ if (!was_enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+
+ if (ret)
+ goto cleanup;
+
+ } else if (was_enabled && !enable) {
+ drm_crtc_vblank_put(crtc);
+ }
+
+ spin_lock_irq(&drm_dev->event_lock);
+ crtc_state->crc_source = source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ crtc_state->crc_frame_skip_count = 0;
+
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ dpu_crtc_setup_lm_misr(crtc_state);
+ else if (source == DPU_CRTC_CRC_SOURCE_ENCODER)
+ dpu_crtc_setup_encoder_misr(crtc);
+ else
+ ret = -EINVAL;
+
+cleanup:
+ drm_modeset_unlock(&crtc->mutex);
+
+ return ret;
+}
+
+static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
+ return 0;
+ }
+
+ return dpu_encoder_get_vsync_count(encoder);
+}
+
+static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
+ struct dpu_crtc_state *crtc_state)
+{
+ struct dpu_crtc_mixer *m;
+ u32 crcs[CRTC_DUAL_MIXERS];
+
+ int rc = 0;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
+
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
+
+ if (rc) {
+ if (rc != -ENODATA)
+ DRM_DEBUG_DRIVER("MISR read failed\n");
+ return rc;
+ }
+ }
+
+ return drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+}
+
+static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc)
+{
+ struct drm_encoder *drm_enc;
+ int rc, pos = 0;
+ u32 crcs[INTF_MAX];
+
+ drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) {
+ rc = dpu_encoder_get_crc(drm_enc, crcs, pos);
+ if (rc < 0) {
+ if (rc != -ENODATA)
+ DRM_DEBUG_DRIVER("MISR read failed\n");
+
+ return rc;
+ }
+
+ pos += rc;
+ }
+
+ return drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
+}
+
+static int dpu_crtc_get_crc(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
+
+ /* Skip first 2 frames in case of "uncooked" CRCs */
+ if (crtc_state->crc_frame_skip_count < 2) {
+ crtc_state->crc_frame_skip_count++;
+ return 0;
+ }
+
+ if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ return dpu_crtc_get_lm_crc(crtc, crtc_state);
+ else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER)
+ return dpu_crtc_get_encoder_crc(crtc);
+
+ return -EINVAL;
+}
+
+static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = dpu_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+ struct dpu_plane_state *pstate, struct dpu_format *format)
+{
+ struct dpu_hw_mixer *lm = mixer->hw_lm;
+ uint32_t blend_op;
+ uint32_t fg_alpha, bg_alpha;
+
+ fg_alpha = pstate->base.alpha >> 8;
+ bg_alpha = 0xff - fg_alpha;
+
+ /* default to opaque blending */
+ if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
+ !format->alpha_enable) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST;
+ } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
+ } else {
+ /* coverage blending */
+ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= DPU_BLEND_FG_MOD_ALPHA |
+ DPU_BLEND_FG_INV_MOD_ALPHA |
+ DPU_BLEND_BG_MOD_ALPHA |
+ DPU_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= DPU_BLEND_BG_INV_ALPHA;
+ }
+ }
+
+ lm->ops.setup_blend_config(lm, pstate->stage,
+ fg_alpha, bg_alpha, blend_op);
+
+ DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
+ &format->base.pixel_format, format->alpha_enable, blend_op);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *crtc_state;
+ int lm_idx, lm_horiz_position;
+
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
+ const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+ struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer_cfg cfg;
+
+ if (!lm_roi || !drm_rect_visible(lm_roi))
+ continue;
+
+ cfg.out_width = drm_rect_width(lm_roi);
+ cfg.out_height = drm_rect_height(lm_roi);
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
+ struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_format *format;
+ struct dpu_hw_ctl *ctl = mixer->lm_ctl;
+
+ uint32_t stage_idx, lm_idx;
+ int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+ bool bg_alpha_enable = false;
+ DECLARE_BITMAP(fetch_active, SSPP_MAX);
+
+ memset(fetch_active, 0, sizeof(fetch_active));
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum dpu_sspp sspp_idx;
+
+ state = plane->state;
+ if (!state)
+ continue;
+
+ if (!state->visible)
+ continue;
+
+ pstate = to_dpu_plane_state(state);
+ fb = state->fb;
+
+ sspp_idx = dpu_plane_pipe(plane);
+ set_bit(sspp_idx, fetch_active);
+
+ DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ sspp_idx - SSPP_VIG0,
+ state->fb ? state->fb->base.id : -1);
+
+ format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+
+ if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+ bg_alpha_enable = true;
+
+ stage_idx = zpos_cnt[pstate->stage]++;
+ stage_cfg->stage[pstate->stage][stage_idx] =
+ sspp_idx;
+ stage_cfg->multirect_index[pstate->stage][stage_idx] =
+ pstate->multirect_index;
+
+ trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+ state, pstate, stage_idx,
+ sspp_idx - SSPP_VIG0,
+ format->base.pixel_format,
+ fb ? fb->modifier : 0);
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
+ pstate, format);
+
+ mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl,
+ sspp_idx);
+
+ if (bg_alpha_enable && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ }
+ }
+
+ if (ctl->ops.set_active_pipes)
+ ctl->ops.set_active_pipes(ctl, fetch_active);
+
+ _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg stage_cfg;
+ int i;
+
+ DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ mixer[i].mixer_op_mode = 0;
+ if (mixer[i].lm_ctl->ops.clear_all_blendstages)
+ mixer[i].lm_ctl->ops.clear_all_blendstages(
+ mixer[i].lm_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &stage_cfg);
+ }
+}
+
+/**
+ * _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ dpu_crtc->event);
+ trace_dpu_crtc_complete_flip(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ /*
+ * TODO: This function is called from dpu debugfs and as part of atomic
+ * check. When called from debugfs, the crtc->mutex must be held to
+ * read crtc->state. However reading crtc->state from atomic check isn't
+ * allowed (unless you have a good reason, a big comment, and a deep
+ * understanding of how the atomic/modeset locks work (<- and this is
+ * probably not possible)). So we'll keep the WARN_ON here for now, but
+ * really we need to figure out a better way to track our operating mode
+ */
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* TODO: Returns the first INTF_MODE, could there be multiple values? */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ return dpu_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+ dpu_crtc->vblank_cb_time = ktime_get();
+ else
+ dpu_crtc->vblank_cb_count++;
+
+ dpu_crtc_get_crc(crtc);
+
+ drm_crtc_handle_vblank(crtc);
+ trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct dpu_crtc_frame_event *fevent = container_of(work,
+ struct dpu_crtc_frame_event, work);
+ struct drm_crtc *crtc = fevent->crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ unsigned long flags;
+ bool frame_done = false;
+
+ DPU_ATRACE_BEGIN("crtc_frame_event");
+
+ DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+ /* ignore vblank when not pending */
+ } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ trace_dpu_crtc_frame_event_done(DRMID(crtc),
+ fevent->event);
+ dpu_core_perf_crtc_release_bw(crtc);
+ } else {
+ trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+ fevent->event);
+ }
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+ DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+ crtc->base.id, ktime_to_ns(fevent->ts));
+
+ if (frame_done)
+ complete_all(&dpu_crtc->frame_done_comp);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+ DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ unsigned long flags;
+ u32 crtc_id;
+
+ /* Nothing to do on idle event */
+ if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
+
+ trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+ struct dpu_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc)
+{
+ trace_dpu_crtc_complete_commit(DRMID(crtc));
+ dpu_core_perf_crtc_update(crtc, 0, false);
+ _dpu_crtc_complete_flip(crtc);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct drm_display_mode *adj_mode = &state->adjusted_mode;
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ struct drm_rect *r = &cstate->lm_bounds[i];
+ r->x1 = crtc_split_width * i;
+ r->y1 = 0;
+ r->x2 = r->x1 + crtc_split_width;
+ r->y2 = adj_mode->vdisplay;
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+ }
+}
+
+static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+ struct drm_color_ctm *ctm;
+
+ memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
+
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (!ctm)
+ return;
+
+ cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
+ cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
+ cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
+
+ cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
+ cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
+ cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
+
+ cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
+ cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
+ cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
+}
+
+static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state = crtc->state;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
+ struct dpu_hw_pcc_cfg cfg;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_dspp *dspp;
+ int i;
+
+
+ if (!state->color_mgmt_changed)
+ return;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
+ dspp = mixer[i].hw_dspp;
+
+ if (!dspp || !dspp->ops.setup_pcc)
+ continue;
+
+ if (!state->ctm) {
+ dspp->ops.setup_pcc(dspp, NULL);
+ } else {
+ _dpu_crtc_get_pcc_coeff(state, &cfg);
+ dspp->ops.setup_pcc(dspp, &cfg);
+ }
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush_dspp(ctl,
+ mixer[i].hw_dspp->idx);
+ }
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct drm_encoder *encoder;
+
+ if (!crtc->state->enable) {
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_trigger_kickoff_pending(encoder);
+
+ /*
+ * If no mixers have been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!cstate->num_mixers))
+ return;
+
+ _dpu_crtc_blend_setup(crtc);
+
+ _dpu_crtc_setup_cp_blocks(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ struct msm_drm_private *priv;
+ unsigned long flags;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc->state->enable) {
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+ return;
+ }
+
+ WARN_ON(dpu_crtc->event);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!cstate->num_mixers))
+ return;
+
+ /* update performance setting before crtc kickoff */
+ dpu_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (dpu_crtc->smmu_state.transition_error)
+ dpu_plane_set_error(plane, true);
+ dpu_plane_flush(plane);
+ }
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ int ret, rc = 0;
+
+ if (!atomic_read(&dpu_crtc->frame_pending)) {
+ DRM_DEBUG_ATOMIC("no frames pending\n");
+ return 0;
+ }
+
+ DPU_ATRACE_BEGIN("frame done completion wait");
+ ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+ msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
+ if (!ret) {
+ DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+ rc = -ETIMEDOUT;
+ }
+ DPU_ATRACE_END("frame done completion wait");
+
+ return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!cstate->num_mixers))
+ return;
+
+ DPU_ATRACE_BEGIN("crtc_commit");
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (!dpu_encoder_is_valid_for_commit(encoder)) {
+ DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
+ goto end;
+ }
+ }
+ /*
+ * Encoder will flush/start now, unless it has a tx pending. If so, it
+ * may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder);
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
+ } else
+ DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
+
+ dpu_crtc->play_count++;
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_kickoff(encoder);
+
+ reinit_completion(&dpu_crtc->frame_done_comp);
+
+end:
+ DPU_ATRACE_END("crtc_commit");
+}
+
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+
+ if (crtc->state)
+ dpu_crtc_destroy_state(crtc, crtc->state);
+
+ if (cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
+
+ cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ return &cstate->base;
+}
+
+static void dpu_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
+ drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
+ if (cstate->mixers[i].hw_dspp)
+ drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
+ }
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct drm_encoder *encoder;
+ unsigned long flags;
+ bool release_bandwidth = false;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ /* Disable/save vblank irq handling */
+ drm_crtc_vblank_off(crtc);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ old_crtc_state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ release_bandwidth = true;
+ dpu_encoder_assign_crtc(encoder, NULL);
+ }
+
+ /* wait for frame_event_done completion */
+ if (_dpu_crtc_wait_for_frame_done(crtc))
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+
+ trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+ dpu_crtc->enabled = false;
+
+ if (atomic_read(&dpu_crtc->frame_pending)) {
+ trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+ atomic_read(&dpu_crtc->frame_pending));
+ if (release_bandwidth)
+ dpu_core_perf_crtc_release_bw(crtc);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+ }
+
+ dpu_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+
+ memset(cstate->mixers, 0, sizeof(cstate->mixers));
+ cstate->num_mixers = 0;
+
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+
+ pm_runtime_put_sync(crtc->dev->dev);
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *encoder;
+ bool request_bandwidth = false;
+
+ pm_runtime_get_sync(crtc->dev->dev);
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ /* in video mode, we hold an extra bandwidth reference
+ * as we cannot drop bandwidth at frame-done if any
+ * crtc is being used in video mode.
+ */
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+ request_bandwidth = true;
+ dpu_encoder_register_frame_event_callback(encoder,
+ dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ if (request_bandwidth)
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
+ trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+ dpu_crtc->enabled = true;
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, crtc);
+
+ /* Enable/restore vblank irq handling */
+ drm_crtc_vblank_on(crtc);
+}
+
+struct plane_state {
+ struct dpu_plane_state *dpu_pstate;
+ const struct drm_plane_state *drm_pstate;
+ int stage;
+ u32 pipe_id;
+};
+
+static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
+{
+ struct drm_crtc *crtc = cstate->crtc;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
+ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
+ struct plane_state *pstates;
+
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
+
+ struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ int multirect_count = 0;
+ const struct drm_plane_state *pipe_staged[SSPP_MAX];
+ int left_zpos_cnt = 0, right_zpos_cnt = 0;
+ struct drm_rect crtc_rect = { 0 };
+ bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+
+ pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+ if (!pstates)
+ return -ENOMEM;
+
+ if (!crtc_state->enable || !crtc_state->active) {
+ DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, crtc_state->enable,
+ crtc_state->active);
+ memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
+ goto end;
+ }
+
+ mode = &crtc_state->adjusted_mode;
+ DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (crtc_state->active_changed)
+ crtc_state->mode_changed = true;
+
+ memset(pipe_staged, 0, sizeof(pipe_staged));
+
+ if (cstate->num_mixers) {
+ mixer_width = mode->hdisplay / cstate->num_mixers;
+
+ _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
+ }
+
+ crtc_rect.x2 = mode->hdisplay;
+ crtc_rect.y2 = mode->vdisplay;
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+ struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
+ struct drm_rect dst, clip = crtc_rect;
+
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ DPU_ERROR("%s: failed to get plane%d state, %d\n",
+ dpu_crtc->name, plane->base.id, rc);
+ goto end;
+ }
+ if (cnt >= DPU_STAGE_MAX * 4)
+ continue;
+
+ if (!pstate->visible)
+ continue;
+
+ pstates[cnt].dpu_pstate = dpu_pstate;
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = pstate->normalized_zpos;
+ pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+ dpu_pstate->needs_dirtyfb = needs_dirtyfb;
+
+ if (pipe_staged[pstates[cnt].pipe_id]) {
+ multirect_plane[multirect_count].r0 =
+ pipe_staged[pstates[cnt].pipe_id];
+ multirect_plane[multirect_count].r1 = pstate;
+ multirect_count++;
+
+ pipe_staged[pstates[cnt].pipe_id] = NULL;
+ } else {
+ pipe_staged[pstates[cnt].pipe_id] = pstate;
+ }
+
+ cnt++;
+
+ dst = drm_plane_state_dest(pstate);
+ if (!drm_rect_intersect(&clip, &dst)) {
+ DPU_ERROR("invalid vertical/horizontal destination\n");
+ DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+ DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+ DRM_RECT_ARG(&dst));
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ for (i = 1; i < SSPP_MAX; i++) {
+ if (pipe_staged[i])
+ dpu_plane_clear_multirect(pipe_staged[i]);
+ }
+
+ z_pos = -1;
+ for (i = 0; i < cnt; i++) {
+ /* reset counts at every new blend stage */
+ if (pstates[i].stage != z_pos) {
+ left_zpos_cnt = 0;
+ right_zpos_cnt = 0;
+ z_pos = pstates[i].stage;
+ }
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+ DPU_ERROR("> %d plane stages assigned\n",
+ DPU_STAGE_MAX - DPU_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_zpos_cnt++;
+
+ } else {
+ if (right_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_zpos_cnt++;
+ }
+
+ pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+ DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
+ }
+
+ for (i = 0; i < multirect_count; i++) {
+ if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+ DPU_ERROR(
+ "multirect validation failed for planes (%d - %d)\n",
+ multirect_plane[i].r0->plane->base.id,
+ multirect_plane[i].r1->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
+
+ rc = dpu_core_perf_crtc_check(crtc, crtc_state);
+ if (rc) {
+ DPU_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+ /* validate source split:
+ * use pstates sorted by stage to check planes on same stage
+ * we assume that all pipes are in source split so its valid to compare
+ * without taking into account left/right mixer placement
+ */
+ for (i = 1; i < cnt; i++) {
+ struct plane_state *prv_pstate, *cur_pstate;
+ struct drm_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
+
+ prv_pstate = &pstates[i - 1];
+ cur_pstate = &pstates[i];
+ if (prv_pstate->stage != cur_pstate->stage)
+ continue;
+
+ stage = cur_pstate->stage;
+
+ left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+ left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+ right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+ right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+ if (right_rect.x1 < left_rect.x1) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
+ */
+ if (right_pid < left_pid) {
+ DPU_ERROR(
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
+ rc = -EINVAL;
+ goto end;
+ } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+ DPU_ERROR("non-contiguous coordinates for src split. "
+ "stage: %d left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ } else if (left_rect.y1 != right_rect.y1 ||
+ drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+ DPU_ERROR("source split at stage: %d. invalid "
+ "yoff/height: left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ kfree(pstates);
+ return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
+
+ trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+
+ /*
+ * Normally we would iterate through encoder_mask in crtc state to find
+ * attached encoders. In this case, we might be disabling vblank _after_
+ * encoder_mask has been cleared.
+ *
+ * Instead, we "assign" a crtc to the encoder in enable and clear it in
+ * disable (which is also after encoder_mask is cleared). So instead of
+ * using encoder mask, we'll ask the encoder to toggle itself iff it's
+ * currently assigned to our crtc.
+ *
+ * Note also that this function cannot be called while crtc is disabled
+ * since we use drm_crtc_vblank_on/off. So we don't need to worry
+ * about the assigned crtcs being inconsistent with the current state
+ * (which means no need to worry about modeset locks).
+ */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+ dpu_crtc);
+
+ dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+
+ int i, out_width;
+
+ dpu_crtc = s->private;
+ crtc = &dpu_crtc->base;
+
+ drm_modeset_lock_all(crtc->dev);
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ mode = &crtc->state->adjusted_mode;
+ out_width = mode->hdisplay / cstate->num_mixers;
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < cstate->num_mixers; ++i) {
+ m = &cstate->mixers[i];
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+ fb->base.id, (char *) &fb->format->format,
+ fb->width, fb->height);
+ for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+ seq_printf(s, "cpp[%d]:%u ",
+ i, fb->format->cpp[i]);
+ seq_puts(s, "\n\t");
+
+ seq_printf(s, "modifier:%8llu ", fb->modifier);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+ pstate->multirect_mode, pstate->multirect_index);
+
+ seq_puts(s, "\n");
+ }
+ if (dpu_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? div_s64(
+ dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+ fps, dpu_crtc->vblank_cb_count,
+ ktime_to_ms(diff), dpu_crtc->play_count);
+
+ /* reset time & count for next measurement */
+ dpu_crtc->vblank_cb_count = 0;
+ dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ drm_modeset_unlock_all(crtc->dev);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+ seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+ seq_printf(s, "core_clk_rate: %llu\n",
+ dpu_crtc->cur_perf.core_clk_rate);
+ seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "max_per_pipe_ib: %llu\n",
+ dpu_crtc->cur_perf.max_per_pipe_ib);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dentry *debugfs_root;
+
+ debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ crtc->dev->primary->debugfs_root);
+
+ debugfs_create_file("status", 0400,
+ debugfs_root,
+ dpu_crtc, &_dpu_debugfs_status_fops);
+ debugfs_create_file("state", 0600,
+ debugfs_root,
+ &dpu_crtc->base,
+ &dpu_crtc_debugfs_state_fops);
+
+ return 0;
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+ return _dpu_crtc_init_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = dpu_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_crtc_reset,
+ .atomic_duplicate_state = dpu_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_crtc_destroy_state,
+ .atomic_print_state = dpu_crtc_atomic_print_state,
+ .late_register = dpu_crtc_late_register,
+ .verify_crc_source = dpu_crtc_verify_crc_source,
+ .set_crc_source = dpu_crtc_set_crc_source,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .get_vblank_counter = dpu_crtc_get_vblank_counter,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ .atomic_disable = dpu_crtc_disable,
+ .atomic_enable = dpu_crtc_enable,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+ .get_scanout_position = dpu_crtc_get_scanout_position,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ int i;
+
+ dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &dpu_crtc->base;
+ crtc->dev = dev;
+
+ spin_lock_init(&dpu_crtc->spin_lock);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+
+ init_completion(&dpu_crtc->frame_done_comp);
+
+ INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+ list_add(&dpu_crtc->frame_events[i].list,
+ &dpu_crtc->frame_event_list);
+ kthread_init_work(&dpu_crtc->frame_events[i].work,
+ dpu_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
+ NULL);
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+
+ if (dpu_kms->catalog->dspp_count)
+ drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize event handling */
+ spin_lock_init(&dpu_crtc->event_lock);
+
+ DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 000000000..539b68b16
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+
+#define DPU_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE 4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state: smmu state
+ * @ATTACHED: all the context banks are attached.
+ * @DETACHED: all the context banks are detached.
+ * @ATTACH_ALL_REQ: transient state of attaching context banks.
+ * @DETACH_ALL_REQ: transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+ ATTACHED = 0,
+ DETACHED,
+ ATTACH_ALL_REQ,
+ DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+ NONE,
+ PRE_COMMIT,
+ POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+ uint32_t state;
+ uint32_t transition_type;
+ uint32_t transition_error;
+};
+
+/**
+ * enum dpu_crtc_crc_source: CRC source
+ * @DPU_CRTC_CRC_SOURCE_NONE: no source set
+ * @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer
+ * @DPU_CRTC_CRC_SOURCE_ENCODER: CRC in encoder
+ * @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source
+ */
+enum dpu_crtc_crc_source {
+ DPU_CRTC_CRC_SOURCE_NONE = 0,
+ DPU_CRTC_CRC_SOURCE_LAYER_MIXER,
+ DPU_CRTC_CRC_SOURCE_ENCODER,
+ DPU_CRTC_CRC_SOURCE_MAX,
+ DPU_CRTC_CRC_SOURCE_INVALID = -1
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @lm_ctl: CTL Path HW driver context
+ * @lm_dspp: DSPP HW driver context
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+ struct dpu_hw_mixer *hw_lm;
+ struct dpu_hw_ctl *lm_ctl;
+ struct dpu_hw_dspp *hw_dspp;
+ u32 mixer_op_mode;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct dpu_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT 16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count : frame count between crtc enable and disable
+ * @vblank_cb_time : ktime at vblank count reset
+ * @enabled : whether the DPU CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
+ * @event_thread : Pointer to event handler thread
+ * @event_worker : Event worker queue
+ * @event_lock : Spinlock around event handling code
+ * @phandle: Pointer to power handler
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @crc_source : CRC source
+ */
+struct dpu_crtc {
+ struct drm_crtc base;
+ char name[DPU_CRTC_NAME_SIZE];
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ u32 vblank_cb_count;
+ u64 play_count;
+ ktime_t vblank_cb_time;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ struct list_head ad_dirty;
+ struct list_head ad_active;
+
+ atomic_t frame_pending;
+ struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+ struct completion frame_done_comp;
+
+ /* for handling internal event thread */
+ spinlock_t event_lock;
+
+ struct dpu_core_perf_params cur_perf;
+
+ struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ * @num_mixers : Number of mixers in use
+ * @mixers : List of active mixers
+ * @num_ctls : Number of ctl paths in use
+ * @hw_ctls : List of active ctl paths
+ * @crc_source : CRC source
+ * @crc_frame_skip_count: Number of frames skipped before getting CRC
+ */
+struct dpu_crtc_state {
+ struct drm_crtc_state base;
+
+ bool bw_control;
+ bool bw_split_vote;
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+ uint64_t input_fence_timeout_ns;
+
+ struct dpu_core_perf_params new_perf;
+
+ /* HW Resources reserved for the crtc */
+ u32 num_mixers;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+ u32 num_ctls;
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
+
+ enum dpu_crtc_crc_source crc_source;
+ int crc_frame_skip_count;
+};
+
+#define to_dpu_crtc_state(x) \
+ container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+ return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @cursor: cursor plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+ struct drm_crtc *crtc)
+{
+ return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 000000000..b0eb881f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+#include <linux/kthread.h>
+#include <linux/seq_file.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_probe_helper.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_hw_dsc.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define IDLE_SHORT_TIMEOUT 1
+
+#define MAX_HDISPLAY_SPLIT 1080
+
+/* timeout in frames waiting for frame done */
+#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks. Regardless of the previous
+ * state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ * This event happens at NORMAL priority.
+ * This event, when received during the ON state, leave the RC STATE
+ * in the PRE_OFF state. It should be followed by the STOP event as
+ * part of encoder disable.
+ * If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks, and
+ * disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ * PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ * Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and change the resource state
+ * to IDLE.
+ */
+enum dpu_enc_rc_events {
+ DPU_ENC_RC_EVENT_KICKOFF = 1,
+ DPU_ENC_RC_EVENT_FRAME_DONE,
+ DPU_ENC_RC_EVENT_PRE_STOP,
+ DPU_ENC_RC_EVENT_STOP,
+ DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+ DPU_ENC_RC_STATE_OFF,
+ DPU_ENC_RC_STATE_PRE_OFF,
+ DPU_ENC_RC_STATE_ON,
+ DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enabled: True if the encoder is active, protected by enc_lock
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @cur_slave: As above but for the slave encoder.
+ * @hw_pp: Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @hw_dsc: Handle to the DSC blocks used for the display.
+ * @dsc_mask: Bitmask of used DSC blocks.
+ * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
+ * for partial update right-only cases, such as pingpong
+ * split where virtual pingpong does not generate IRQs
+ * @crtc: Pointer to the currently assigned crtc. Normally you
+ * would use crtc->state->encoder_mask to determine the
+ * link between encoder/crtc. However in this case we need
+ * to track crtc in the disable() hook which is called
+ * _after_ encoder_mask is cleared.
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder
+ * create/destroy/enable/disable
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @frame_done_timeout_ms: frame done timeout in ms
+ * @frame_done_timer: watchdog timer for frame done event
+ * @vsync_event_timer: vsync timer
+ * @disp_info: local copy of msm_display_info struct
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work: worker to handle vsync event for autorefresh
+ * @topology: topology of the display
+ * @idle_timeout: idle timeout duration in milliseconds
+ * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
+ */
+struct dpu_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+
+ bool enabled;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct dpu_encoder_phys *cur_master;
+ struct dpu_encoder_phys *cur_slave;
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+
+ unsigned int dsc_mask;
+
+ bool intfs_swapped;
+
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+
+ atomic_t frame_done_timeout_ms;
+ struct timer_list frame_done_timer;
+ struct timer_list vsync_event_timer;
+
+ struct msm_display_info disp_info;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum dpu_enc_rc_states rc_state;
+ struct delayed_work delayed_off_work;
+ struct kthread_work vsync_event_work;
+ struct msm_display_topology topology;
+
+ u32 idle_timeout;
+
+ bool wide_bus_en;
+
+ /* DSC configuration */
+ struct drm_dsc_config *dsc;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+
+static u32 dither_matrix[DITHER_MATRIX_SZ] = {
+ 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
+};
+
+
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
+{
+ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ return dpu_enc->wide_bus_en;
+}
+
+int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i, num_intf = 0;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->hw_intf && phys->hw_intf->ops.setup_misr
+ && phys->hw_intf->ops.collect_misr)
+ num_intf++;
+ }
+
+ return num_intf;
+}
+
+void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
+ continue;
+
+ phys->hw_intf->ops.setup_misr(phys->hw_intf);
+ }
+}
+
+int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ int i, rc = 0, entries_added = 0;
+
+ if (!drm_enc->crtc) {
+ DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
+ continue;
+
+ rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
+ if (rc)
+ return rc;
+ entries_added++;
+ }
+
+ return entries_added;
+}
+
+static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
+{
+ struct dpu_hw_dither_cfg dither_cfg = { 0 };
+
+ if (!hw_pp->ops.setup_dither)
+ return;
+
+ switch (bpc) {
+ case 6:
+ dither_cfg.c0_bitdepth = 6;
+ dither_cfg.c1_bitdepth = 6;
+ dither_cfg.c2_bitdepth = 6;
+ dither_cfg.c3_bitdepth = 6;
+ dither_cfg.temporal_en = 0;
+ break;
+ default:
+ hw_pp->ops.setup_dither(hw_pp, NULL);
+ return;
+ }
+
+ memcpy(&dither_cfg.matrix, dither_matrix,
+ sizeof(u32) * DITHER_MATRIX_SZ);
+
+ hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
+}
+
+static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
+{
+ switch (intf_mode) {
+ case INTF_MODE_VIDEO:
+ return "INTF_MODE_VIDEO";
+ case INTF_MODE_CMD:
+ return "INTF_MODE_CMD";
+ case INTF_MODE_WB_BLOCK:
+ return "INTF_MODE_WB_BLOCK";
+ case INTF_MODE_WB_LINE:
+ return "INTF_MODE_WB_LINE";
+ default:
+ return "INTF_MODE_UNKNOWN";
+ }
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent),
+ dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
+ phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ u32 irq_idx, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
+ struct dpu_encoder_wait_info *wait_info)
+{
+ u32 irq_status;
+ int ret;
+
+ if (!wait_info) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ /* note: do master / slave checking outside */
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq);
+ return -EWOULDBLOCK;
+ }
+
+ if (irq < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
+ DRMID(phys_enc->parent), func);
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ irq,
+ wait_info);
+
+ if (ret <= 0) {
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
+ if (irq_status) {
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+ func(phys_enc, irq);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ func, irq,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+
+ return ret;
+}
+
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
+ return phys ? atomic_read(&phys->vsync_cnt) : 0;
+}
+
+int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int linecount = 0;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ phys = dpu_enc ? dpu_enc->cur_master : NULL;
+
+ if (phys && phys->ops.get_line_count)
+ linecount = phys->ops.get_line_count(phys);
+
+ return linecount;
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --dpu_enc->num_phys_encs;
+ dpu_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (dpu_enc->num_phys_encs)
+ DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+ dpu_enc->num_phys_encs);
+ dpu_enc->num_phys_encs = 0;
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ mutex_destroy(&dpu_enc->enc_lock);
+}
+
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct dpu_hw_mdp *hw_mdptop;
+ struct msm_display_info *disp_info;
+
+ if (!phys_enc->hw_mdptop || !phys_enc->parent) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ disp_info = &dpu_enc->disp_info;
+
+ if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
+ return;
+
+ /**
+ * disable split modes since encoder will be operating in as the only
+ * encoder, either for the entire use case in the case of, for example,
+ * single DSI, or for this frame in the case of left/right only partial
+ * update.
+ */
+ if (phys_enc->split_role == ENC_ROLE_SOLO) {
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ return;
+ }
+
+ cfg.en = true;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ if (phys_enc->split_role == ENC_ROLE_MASTER) {
+ DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ }
+}
+
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ int i, intf_count = 0, num_dsc = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
+ if (dpu_enc->dsc)
+ num_dsc = 2;
+
+ return (num_dsc > 0) && (num_dsc > intf_count);
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct drm_display_mode *mode)
+{
+ struct msm_display_topology topology = {0};
+ int i, intf_count = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* Datapath topology selection
+ *
+ * Dual display
+ * 2 LM, 2 INTF ( Split display using 2 interfaces)
+ *
+ * Single display
+ * 1 LM, 1 INTF
+ * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+ *
+ * Adding color blocks only to primary interface if available in
+ * sufficient number
+ */
+ if (intf_count == 2)
+ topology.num_lm = 2;
+ else if (!dpu_kms->catalog->caps->has_3d_merge)
+ topology.num_lm = 1;
+ else
+ topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+ if (dpu_kms->catalog->dspp &&
+ (dpu_kms->catalog->dspp_count >= topology.num_lm))
+ topology.num_dspp = topology.num_lm;
+ }
+
+ topology.num_enc = 0;
+ topology.num_intf = intf_count;
+
+ if (dpu_enc->dsc) {
+ /* In case of Display Stream Compression (DSC), we would use
+ * 2 encoders, 2 layer mixers and 1 interface
+ * this is power optimal and can drive up to (including) 4k
+ * screens
+ */
+ topology.num_enc = 2;
+ topology.num_dsc = 2;
+ topology.num_intf = 1;
+ topology.num_lm = 2;
+ }
+
+ return topology;
+}
+
+static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode *adj_mode;
+ struct msm_display_topology topology;
+ struct dpu_global_state *global_state;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ adj_mode = &crtc_state->adjusted_mode;
+ global_state = dpu_kms_get_global_state(crtc_state->state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. */
+ if (!ret) {
+ /*
+ * Release and Allocate resources on every modeset
+ * Dont allocate when active is false.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ dpu_rm_release(global_state, drm_enc);
+
+ if (!crtc_state->active_changed || crtc_state->enable)
+ ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ drm_enc, crtc_state, topology);
+ }
+ }
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+
+ return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_info *disp_info)
+{
+ struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ int i;
+
+ if (!dpu_enc || !disp_info) {
+ DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+ dpu_enc != NULL, disp_info != NULL);
+ return;
+ } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+ DPU_ERROR("invalid num phys enc %d/%d\n",
+ dpu_enc->num_phys_encs,
+ (int) ARRAY_SIZE(dpu_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &dpu_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ hw_mdptop = dpu_kms->hw_mdp;
+ if (!hw_mdptop) {
+ DPU_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->is_cmd_mode) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+ if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.irq_control)
+ phys->ops.irq_control(phys, enable);
+ }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* enable all the irq */
+ _dpu_encoder_irq_control(drm_enc, true);
+
+ } else {
+ /* disable all the irq */
+ _dpu_encoder_irq_control(drm_enc, false);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ bool is_vid_mode = false;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+ * events and return early for other events (ie wb display).
+ */
+ if (!dpu_enc->idle_pc_supported &&
+ (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+ sw_event != DPU_ENC_RC_EVENT_STOP &&
+ sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+ return 0;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
+
+ switch (sw_event) {
+ case DPU_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+ dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+ _dpu_encoder_irq_control(drm_enc, true);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, true);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
+ return 0;
+ }
+
+ queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
+ break;
+
+ case DPU_ENC_RC_EVENT_PRE_STOP:
+ /* cancel delayed off work, if any */
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (is_vid_mode &&
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ _dpu_encoder_irq_control(drm_enc, true);
+ }
+ /* skip if is already OFF or IDLE, resources are off already */
+ else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_STOP:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ /**
+ * expect to arrive here only if in either idle state or pre-off
+ * and in IDLE state the resources are already disabled
+ */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (dpu_enc->frame_busy_mask[0]) {
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ if (is_vid_mode)
+ _dpu_encoder_irq_control(drm_enc, false);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ default:
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
+ break;
+ }
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
+ return 0;
+}
+
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.prepare_wb_job)
+ phys->ops.prepare_wb_job(phys, job);
+
+ }
+}
+
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.cleanup_wb_job)
+ phys->ops.cleanup_wb_job(phys, job);
+
+ }
+}
+
+static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_crtc_state *cstate;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
+ struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_pp, num_dsc;
+ unsigned int dsc_mask = 0;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ global_state = dpu_kms_get_existing_global_state(dpu_kms);
+ if (IS_ERR_OR_NULL(global_state)) {
+ DPU_ERROR("Failed to get global state");
+ return;
+ }
+
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+ /* Query resource that have been reserved in atomic check step. */
+ num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
+ ARRAY_SIZE(hw_pp));
+ num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
+ num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
+ : NULL;
+
+ if (dpu_enc->dsc) {
+ num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSC,
+ hw_dsc, ARRAY_SIZE(hw_dsc));
+ for (i = 0; i < num_dsc; i++) {
+ dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
+ dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
+ }
+ }
+
+ dpu_enc->dsc_mask = dsc_mask;
+
+ cstate = to_dpu_crtc_state(crtc_state);
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
+ cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
+ }
+
+ cstate->num_mixers = num_lm;
+
+ dpu_enc->connector = conn_state->connector;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no pp block assigned at idx: %d\n", i);
+ return;
+ }
+
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no ctl block assigned at idx: %d\n", i);
+ return;
+ }
+
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
+
+ phys->cached_mode = crtc_state->adjusted_mode;
+ if (phys->ops.atomic_mode_set)
+ phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
+ }
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!drm_enc || !drm_enc->dev) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ if (!dpu_enc || !dpu_enc->cur_master) {
+ DPU_ERROR("invalid dpu encoder/master\n");
+ return;
+ }
+
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
+ _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
+ !WARN_ON(dpu_enc->num_phys_encs == 0)) {
+ unsigned bpc = dpu_enc->connector->display_info.bpc;
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_enc->hw_pp[i])
+ continue;
+ _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
+ }
+ }
+}
+
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ if (!dpu_enc->enabled)
+ goto out;
+
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+ dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+ dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int ret = 0;
+ struct drm_display_mode *cur_mode = NULL;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
+
+ /* always enable slave encoder before master */
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
+ dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
+
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+ ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+ ret);
+ goto out;
+ }
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+
+ dpu_enc->enabled = true;
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->enabled = false;
+
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
+ DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&dpu_enc->frame_done_timer);
+ }
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+ dpu_enc->connector = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+ mutex_unlock(&dpu_enc->enc_lock);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ if (type == INTF_WB)
+ return INTF_MAX;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static enum dpu_wb dpu_encoder_get_wb(const struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ if (type != INTF_WB)
+ return WB_MAX;
+
+ for (i = 0; i < catalog->wb_count; i++) {
+ if (catalog->wb[i].id == controller_id)
+ return catalog->wb[i].id;
+ }
+
+ return WB_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_vblank_callback");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc)
+ dpu_crtc_vblank_callback(dpu_enc->crtc);
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+
+ /* trigger dump only on the first underrun */
+ if (atomic_read(&phy_enc->underrun_cnt) == 1)
+ msm_disp_snapshot_state(drm_enc->dev);
+
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ /* crtc should always be cleared before re-assigning */
+ WARN_ON(crtc && dpu_enc->crtc);
+ dpu_enc->crtc = crtc;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+ struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ int i;
+
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc != crtc) {
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_frame_event_cb = frame_event_cb;
+ dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned int i;
+
+ if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
+ dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
+ ready_phys->intf_idx, ready_phys->wb_idx);
+ return;
+ }
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] == ready_phys) {
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
+ clear_bit(i, dpu_enc->frame_busy_mask);
+ }
+ }
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ del_timer(&dpu_enc->frame_done_timer);
+
+ dpu_encoder_resource_control(drm_enc,
+ DPU_ENC_RC_EVENT_FRAME_DONE);
+
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data, event);
+ }
+}
+
+static void dpu_encoder_off_work(struct work_struct *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, delayed_off_work.work);
+
+ dpu_encoder_resource_control(&dpu_enc->base,
+ DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+ dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+ DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys: Pointer to physical encoder structure
+ * @extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct dpu_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl->ops.trigger_flush) {
+ DPU_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+
+ if (ctl->ops.get_pending_flush)
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc),
+ dpu_encoder_helper_get_intf_type(phys->intf_mode),
+ phys->intf_idx, phys->wb_idx,
+ pending_kickoff_cnt, ctl->idx,
+ extra_flush_bits, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * @phys: Pointer to physical encoder structure
+ */
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+ if (!phys) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+ }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ u32 irq_idx,
+ struct dpu_encoder_wait_info *info)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+ s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*(info->wq),
+ atomic_read(info->atomic_cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+ struct drm_encoder *drm_enc;
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ ctl = phys_enc->hw_ctl;
+ drm_enc = phys_enc->parent;
+
+ if (!ctl->ops.reset)
+ return;
+
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
+ ctl->idx);
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ msm_disp_snapshot_state(drm_enc->dev);
+ }
+
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * @dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ pending_flush = 0x0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->enable_state == DPU_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+
+ /*
+ * This is cleared in frame_done worker, which isn't invoked
+ * for async commits. So don't set this for async, since it'll
+ * roll over to the next commit.
+ */
+ if (phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
+
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && dpu_enc->cur_master) {
+ _dpu_encoder_trigger_flush(
+ &dpu_enc->base,
+ dpu_enc->cur_master,
+ pending_flush);
+ }
+
+ _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned int i;
+ struct dpu_hw_ctl *ctl;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ disp_info->is_cmd_mode
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
+ }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+ struct drm_display_mode *mode)
+{
+ u64 pclk_rate;
+ u32 pclk_period;
+ u32 line_time;
+
+ /*
+ * For linetime calculation, only operate on master encoder.
+ */
+ if (!dpu_enc->cur_master)
+ return 0;
+
+ if (!dpu_enc->cur_master->ops.get_line_count) {
+ DPU_ERROR("get_line_count function not defined\n");
+ return 0;
+ }
+
+ pclk_rate = mode->clock; /* pixel clock in kHz */
+ if (pclk_rate == 0) {
+ DPU_ERROR("pclk is 0, cannot calculate line time\n");
+ return 0;
+ }
+
+ pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+ if (pclk_period == 0) {
+ DPU_ERROR("pclk period is 0\n");
+ return 0;
+ }
+
+ /*
+ * Line time calculation based on Pixel clock and HTOTAL.
+ * Final unit is in ns.
+ */
+ line_time = (pclk_period * mode->htotal) / 1000;
+ if (line_time == 0) {
+ DPU_ERROR("line time calculation is 0\n");
+ return 0;
+ }
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+ pclk_rate, pclk_period, line_time);
+
+ return line_time;
+}
+
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
+{
+ struct drm_display_mode *mode;
+ struct dpu_encoder_virt *dpu_enc;
+ u32 cur_line;
+ u32 line_time;
+ u32 vtotal, time_to_vsync;
+ ktime_t cur_time;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (!drm_enc->crtc || !drm_enc->crtc->state) {
+ DPU_ERROR("crtc/crtc state object is NULL\n");
+ return -EINVAL;
+ }
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+ if (!line_time)
+ return -EINVAL;
+
+ cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+ vtotal = mode->vtotal;
+ if (cur_line >= vtotal)
+ time_to_vsync = line_time * vtotal;
+ else
+ time_to_vsync = line_time * (vtotal - cur_line);
+
+ if (time_to_vsync == 0) {
+ DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+ vtotal);
+ return -EINVAL;
+ }
+
+ cur_time = ktime_get();
+ *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+ cur_line, vtotal, time_to_vsync,
+ ktime_to_ms(cur_time),
+ ktime_to_ms(*wakeup_time));
+ return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ vsync_event_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+
+ if (!drm_enc->dev || !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return;
+ }
+ event_thread = &priv->event_thread[drm_enc->crtc->index];
+ if (!event_thread) {
+ DPU_ERROR("event_thread not found for crtc:%d\n",
+ drm_enc->crtc->index);
+ return;
+ }
+
+ del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, vsync_event_work);
+ ktime_t wakeup_time;
+
+ if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
+ return;
+
+ trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+static u32
+dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
+ u32 enc_ip_width)
+{
+ int ssm_delay, total_pixels, soft_slice_per_enc;
+
+ soft_slice_per_enc = enc_ip_width / dsc->slice_width;
+
+ /*
+ * minimum number of initial line pixels is a sum of:
+ * 1. sub-stream multiplexer delay (83 groups for 8bpc,
+ * 91 for 10 bpc) * 3
+ * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
+ * 3. the initial xmit delay
+ * 4. total pipeline delay through the "lock step" of encoder (47)
+ * 5. 6 additional pixels as the output of the rate buffer is
+ * 48 bits wide
+ */
+ ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
+ if (soft_slice_per_enc > 1)
+ total_pixels += (ssm_delay * 3);
+ return DIV_ROUND_UP(total_pixels, dsc->slice_width);
+}
+
+static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
+ struct dpu_hw_pingpong *hw_pp,
+ struct drm_dsc_config *dsc,
+ u32 common_mode,
+ u32 initial_lines)
+{
+ if (hw_dsc->ops.dsc_config)
+ hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
+
+ if (hw_dsc->ops.dsc_config_thresh)
+ hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
+
+ if (hw_pp->ops.setup_dsc)
+ hw_pp->ops.setup_dsc(hw_pp);
+
+ if (hw_pp->ops.enable_dsc)
+ hw_pp->ops.enable_dsc(hw_pp);
+}
+
+static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
+ struct drm_dsc_config *dsc)
+{
+ /* coding only for 2LM, 2enc, 1 dsc config */
+ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ int this_frame_slices;
+ int intf_ip_w, enc_ip_w;
+ int dsc_common_mode;
+ int pic_width;
+ u32 initial_lines;
+ int i;
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_pp[i] = dpu_enc->hw_pp[i];
+ hw_dsc[i] = dpu_enc->hw_dsc[i];
+
+ if (!hw_pp[i] || !hw_dsc[i]) {
+ DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
+ return;
+ }
+ }
+
+ dsc_common_mode = 0;
+ pic_width = dsc->pic_width;
+
+ dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+ if (enc_master->intf_mode == INTF_MODE_VIDEO)
+ dsc_common_mode |= DSC_MODE_VIDEO;
+
+ this_frame_slices = pic_width / dsc->slice_width;
+ intf_ip_w = this_frame_slices * dsc->slice_width;
+
+ /*
+ * dsc merge case: when using 2 encoders for the same stream,
+ * no. of slices need to be same on both the encoders.
+ */
+ enc_ip_w = intf_ip_w / 2;
+ initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ bool needs_hw_reset = false;
+ unsigned int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
+ }
+ DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+ /* if any phys needs reset, reset all phys, in-order */
+ if (needs_hw_reset) {
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
+ }
+ }
+
+ if (dpu_enc->dsc)
+ dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
+}
+
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned int i;
+ struct dpu_encoder_phys *phys;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
+ DPU_DEBUG("invalid FB not kicking off\n");
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ ktime_t wakeup_time;
+ unsigned long timeout_ms;
+ unsigned int i;
+
+ DPU_ATRACE_BEGIN("encoder_kickoff");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _dpu_encoder_kickoff_phys(dpu_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
+ !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
+ trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+ ktime_to_ms(wakeup_time));
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+ }
+
+ DPU_ATRACE_END("encoder_kickoff");
+}
+
+static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_mixer_cfg mixer;
+ int i, num_lm;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *hw_lm[2];
+ struct dpu_hw_mixer *hw_mixer[2];
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+
+ memset(&mixer, 0, sizeof(mixer));
+
+ /* reset all mixers for this encoder */
+ if (phys_enc->hw_ctl->ops.clear_all_blendstages)
+ phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+
+ global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
+
+ num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
+ phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+
+ for (i = 0; i < num_lm; i++) {
+ hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
+ phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
+
+ /* clear all blendstages */
+ if (phys_enc->hw_ctl->ops.setup_blendstage)
+ phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ }
+}
+
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+ int i;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+
+ phys_enc->hw_ctl->ops.reset(ctl);
+
+ dpu_encoder_helper_reset_mixers(phys_enc);
+
+ /*
+ * TODO: move the once-only operation like CTL flush/trigger
+ * into dpu_encoder_virt_disable() and all operations which need
+ * to be done per phys encoder into the phys_disable() op.
+ */
+ if (phys_enc->hw_wb) {
+ /* disable the PP block */
+ if (phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
+ phys_enc->hw_pp->idx);
+
+ /* mark WB flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
+ phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ } else {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ dpu_enc->phys_encs[i]->hw_intf, false,
+ dpu_enc->phys_encs[i]->hw_pp->idx);
+
+ /* mark INTF flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
+ phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ dpu_enc->phys_encs[i]->hw_intf->idx);
+ }
+ }
+
+ /* reset the merge 3D HW block */
+ if (phys_enc->hw_pp->merge_3d) {
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ BLEND_3D_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
+ phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ phys_enc->hw_pp->merge_3d->idx);
+ }
+
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_intf)
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
+ if (phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ if (ctl->ops.reset_intf_cfg)
+ ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
+
+ ctl->ops.trigger_flush(ctl);
+ ctl->ops.trigger_start(ctl);
+ ctl->ops.clear_pending_flush(ctl);
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.prepare_commit)
+ phys->ops.prepare_commit(phys);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_encoder_virt *dpu_enc = s->private;
+ int i;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ int i;
+
+ char name[DPU_NAME_SIZE];
+
+ if (!drm_enc->dev) {
+ DPU_ERROR("invalid encoder or kms\n");
+ return -EINVAL;
+ }
+
+ snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ dpu_enc->debugfs_root = debugfs_create_dir(name,
+ drm_enc->dev->primary->debugfs_root);
+
+ /* don't error check these */
+ debugfs_create_file("status", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ if (dpu_enc->phys_encs[i]->ops.late_register)
+ dpu_enc->phys_encs[i]->ops.late_register(
+ dpu_enc->phys_encs[i],
+ dpu_enc->debugfs_root);
+
+ return 0;
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ return 0;
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+ return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+ struct msm_display_info *disp_info,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+{
+ struct dpu_encoder_phys *enc = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(dpu_enc->phys_encs)) {
+ DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+ dpu_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+
+ if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
+ enc = dpu_encoder_phys_wb_init(params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ } else if (disp_info->is_cmd_mode) {
+ enc = dpu_encoder_phys_cmd_init(params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ } else {
+ enc = dpu_encoder_phys_vid_init(params);
+
+ if (IS_ERR(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ if (params->split_role == ENC_ROLE_SLAVE)
+ dpu_enc->cur_slave = enc;
+ else
+ dpu_enc->cur_master = enc;
+
+ return 0;
+}
+
+static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
+ .handle_vblank_virt = dpu_encoder_vblank_callback,
+ .handle_underrun_virt = dpu_encoder_underrun_callback,
+ .handle_frame_done = dpu_encoder_frame_done_callback,
+};
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct msm_display_info *disp_info)
+{
+ int ret = 0;
+ int i = 0;
+ enum dpu_intf_type intf_type = INTF_NONE;
+ struct dpu_enc_phys_init_params phys_params;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
+ return -EINVAL;
+ }
+
+ dpu_enc->cur_master = NULL;
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.dpu_kms = dpu_kms;
+ phys_params.parent = &dpu_enc->base;
+ phys_params.parent_ops = &dpu_encoder_parent_ops;
+ phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+ switch (disp_info->intf_type) {
+ case DRM_MODE_ENCODER_DSI:
+ intf_type = INTF_DSI;
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ intf_type = INTF_DP;
+ break;
+ case DRM_MODE_ENCODER_VIRTUAL:
+ intf_type = INTF_WB;
+ break;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ if (disp_info->intf_type != DRM_MODE_ENCODER_VIRTUAL)
+ dpu_enc->idle_pc_supported =
+ dpu_kms->catalog->caps->has_idle_pc;
+
+ dpu_enc->dsc = disp_info->dsc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+ intf_type,
+ controller_id);
+
+ phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
+ intf_type, controller_id);
+ /*
+ * The phys_params might represent either an INTF or a WB unit, but not
+ * both of them at the same time.
+ */
+ if ((phys_params.intf_idx == INTF_MAX) &&
+ (phys_params.wb_idx == WB_MAX)) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if ((phys_params.intf_idx != INTF_MAX) &&
+ (phys_params.wb_idx != WB_MAX)) {
+ DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ ret = dpu_encoder_virt_add_phys_encs(disp_info,
+ dpu_enc, &phys_params);
+ if (ret)
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+ }
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+
+ if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
+ phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
+
+ if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
+ phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
+
+ if (!phys->hw_intf && !phys->hw_wb) {
+ DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
+ ret = -EINVAL;
+ }
+
+ if (phys->hw_intf && phys->hw_wb) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid phys both intf and wb block at idx: %d\n", i);
+ ret = -EINVAL;
+ }
+ }
+
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ frame_done_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ u32 event;
+
+ if (!drm_enc->dev) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+ return;
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+ return;
+ }
+
+ DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+ dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+ .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
+ .disable = dpu_encoder_virt_disable,
+ .enable = dpu_encoder_virt_enable,
+ .atomic_check = dpu_encoder_virt_atomic_check,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+ .destroy = dpu_encoder_destroy,
+ .late_register = dpu_encoder_late_register,
+ .early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int ret = 0;
+
+ dpu_enc = to_dpu_encoder_virt(enc);
+
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
+ if (ret)
+ goto fail;
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+
+ if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
+ timer_setup(&dpu_enc->vsync_event_timer,
+ dpu_encoder_vsync_event_handler,
+ 0);
+ else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
+ dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
+ priv->dp[disp_info->h_tile_instance[0]]);
+
+ INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
+ dpu_encoder_off_work);
+ dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+ kthread_init_work(&dpu_enc->vsync_event_work,
+ dpu_encoder_vsync_event_work_handler);
+
+ memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+ DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+ return ret;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ dpu_encoder_destroy(drm_enc);
+
+ return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int rc = 0;
+
+ dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ if (!dpu_enc)
+ return ERR_PTR(-ENOMEM);
+
+
+ rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ drm_enc_mode, NULL);
+ if (rc) {
+ devm_kfree(dev->dev, dpu_enc);
+ return ERR_PTR(rc);
+ }
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+ spin_lock_init(&dpu_enc->enc_spinlock);
+ dpu_enc->enabled = false;
+ mutex_init(&dpu_enc->enc_lock);
+ mutex_init(&dpu_enc->rc_lock);
+
+ return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ enum msm_event_wait event)
+{
+ int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ switch (event) {
+ case MSM_ENC_COMMIT_DONE:
+ fn_wait = phys->ops.wait_for_commit_done;
+ break;
+ case MSM_ENC_TX_COMPLETE:
+ fn_wait = phys->ops.wait_for_tx_complete;
+ break;
+ case MSM_ENC_VBLANK:
+ fn_wait = phys->ops.wait_for_vblank;
+ break;
+ default:
+ DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+ event);
+ return -EINVAL;
+ }
+
+ if (fn_wait) {
+ DPU_ATRACE_BEGIN("wait_for_completion_event");
+ ret = fn_wait(phys);
+ DPU_ATRACE_END("wait_for_completion_event");
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+
+ if (!encoder) {
+ DPU_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ dpu_enc = to_dpu_encoder_virt(encoder);
+
+ if (dpu_enc->cur_master)
+ return dpu_enc->cur_master->intf_mode;
+
+ if (dpu_enc->num_phys_encs)
+ return dpu_enc->phys_encs[0]->intf_mode;
+
+ return INTF_MODE_NONE;
+}
+
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->dsc_mask;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 000000000..9e7236ef3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
+
+#define IDLE_TIMEOUT (66 - 16/2)
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_ENCODER_ type
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_cmd_mode Boolean to indicate if the CMD mode is requested
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
+ * @dsc: DSC configuration data for DSC-enabled displays
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_cmd_mode;
+ bool is_te_using_watchdog_timer;
+ struct drm_dsc_config *dsc;
+};
+
+/**
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ */
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
+ */
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc, bool enable);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wakeup_time - get the time of the next vsync
+ */
+int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder: encoder pointer
+ * @event: event to wait for
+ * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+ * frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on the
+ * panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+ * the panel. Encoders will map this differently
+ * depending on the panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+ enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+ struct drm_device *dev,
+ int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev: Pointer to drm device structure
+ * @enc: Pointer to the drm_encoder
+ * @disp_info: Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ * atomic commit, before any registers are written
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ * and command mode encoders.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+ u32 idle_timeout);
+/**
+ * dpu_encoder_get_linecount - get interface line count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_get_vsync_count - get vsync count for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
+
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
+ * in virtual encoder that can collect CRC values
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: Number of physical encoders for given drm encoder
+ */
+int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_setup_misr - enable misr calculations
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder);
+
+/**
+ * dpu_encoder_get_crc - get the crc value from interface blocks
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * Returns: 0 on success, error otherwise
+ */
+int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
+
+/**
+ * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
+ * @drm_enc: Pointer to drm encoder structure
+ */
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 000000000..f2af07d87
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <drm/drm_writeback.h>
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_encoder.h"
+#include "dpu_crtc.h"
+
+#define DPU_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED: Encoder is disabled
+ * @DPU_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED: Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
+ * to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+ DPU_ENC_DISABLING,
+ DPU_ENC_DISABLED,
+ DPU_ENC_ENABLING,
+ DPU_ENC_ENABLED,
+ DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @late_register: DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit: MSM Atomic Call, start of atomic commit sequence
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @atomic_mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @wait_for_tx_complete: Wait for hardware to transfer the pixels
+ * to the panel
+ * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc: phys encoder can update the vsync_enable status
+ * on idle power collapse prepare
+ * @restore: Restore all the encoder configs.
+ * @get_line_count: Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+ int (*late_register)(struct dpu_encoder_phys *encoder,
+ struct dentry *debugfs_root);
+ void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+ bool (*is_master)(struct dpu_encoder_phys *encoder);
+ void (*atomic_mode_set)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*enable)(struct dpu_encoder_phys *encoder);
+ void (*disable)(struct dpu_encoder_phys *encoder);
+ int (*atomic_check)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct dpu_encoder_phys *encoder);
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+ void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+ void (*restore)(struct dpu_encoder_phys *phys);
+ int (*get_line_count)(struct dpu_encoder_phys *phys);
+ int (*get_frame_count)(struct dpu_encoder_phys *phys);
+ void (*prepare_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ void (*cleanup_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ bool (*is_valid_for_commit)(struct dpu_encoder_phys *phys_enc);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ * @INTR_IDX_WB_DONE: Writeback fone interrupt for virtual connector
+ */
+enum dpu_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_CTL_START,
+ INTR_IDX_RDPTR,
+ INTR_IDX_WB_DONE,
+ INTR_IDX_MAX,
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @hw_intf: Hardware interface to the intf registers
+ * @hw_wb: Hardware interface to the wb registers
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on dpu hardware
+ * @wb_idx: Writeback index on dpu hardware
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
+ * pending.
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @irq: IRQ indices
+ */
+struct dpu_encoder_phys {
+ struct drm_encoder *parent;
+ struct dpu_encoder_phys_ops ops;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_intf *hw_intf;
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode cached_mode;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf_mode intf_mode;
+ enum dpu_intf intf_idx;
+ enum dpu_wb wb_idx;
+ spinlock_t *enc_spinlock;
+ enum dpu_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_ctlstart_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ int irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+ atomic_inc_return(&phys->pending_ctlstart_cnt);
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_wb - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @wbirq_refcount: Reference count of writeback interrupt
+ * @wb_done_timeout_cnt: number of wb done irq timeout errors
+ * @wb_cfg: writeback block config to store fb related details
+ * @wb_conn: backpointer to writeback connector
+ * @wb_job: backpointer to current writeback job
+ * @dest: dpu buffer layout for current writeback output buffer
+ */
+struct dpu_encoder_phys_wb {
+ struct dpu_encoder_phys base;
+ atomic_t wbirq_refcount;
+ int wb_done_timeout_cnt;
+ struct dpu_hw_wb_cfg wb_cfg;
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *wb_job;
+ struct dpu_hw_fmt_layout dest;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ * after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+ struct dpu_encoder_phys base;
+ int stream_sel;
+ bool serialize_wait4pp;
+ int pp_timeout_report_cnt;
+ atomic_t pending_vblank_cnt;
+ wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @wb_idx: Writeback index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+ struct dpu_kms *dpu_kms;
+ struct drm_encoder *parent;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf intf_idx;
+ enum dpu_wb wb_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+ wait_queue_head_t *wq;
+ atomic_t *atomic_cnt;
+ s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_crtc_state *dpu_cstate;
+
+ if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+ return BLEND_3D_NONE;
+
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
+
+ /* Use merge_3d unless DSC MERGE topology is used */
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
+ !dpu_encoder_use_dsc_merge(phys_enc->parent))
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
+ * This helper function is used by physical encoder to get DSC blocks mask
+ * used for this encoder.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @irq: IRQ index
+ * @func: IRQ callback to be called in case of timeout
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
+ struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 000000000..ae28b2b93
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/delay.h>
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+ container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS 10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
+
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE);
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl->ops.setup_intf_cfg)
+ return;
+
+ intf_cfg.intf = phys_enc->intf_idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ true,
+ phys_enc->hw_pp->idx);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+ int new_cnt;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ if (!phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("pp_done_irq");
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ new_cnt, event);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+ wake_up_all(&cmd_enc->pending_vblank_wq);
+ DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ DPU_ATRACE_BEGIN("ctl_start_irq");
+
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+ /* Signal any waiting ctl start interrupt */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
+
+ phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
+
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
+
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ bool do_log = false;
+ struct drm_encoder *drm_enc;
+
+ if (!phys_enc->hw_pp)
+ return -EINVAL;
+
+ drm_enc = phys_enc->parent;
+
+ cmd_enc->pp_timeout_report_cnt++;
+ if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+ frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+ do_log = true;
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ do_log = true;
+ }
+
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
+
+ /* to avoid flooding, only log first time, and "dead" time */
+ if (do_log) {
+ DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+ DRMID(drm_enc),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->hw_ctl->idx - CTL_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+ msm_disp_snapshot_state(drm_enc->dev);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
+ }
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ drm_enc, phys_enc, frame_event);
+
+ return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+ else if (!ret)
+ cmd_enc->pp_timeout_report_cnt = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable ? "true" : "false", refcount);
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_pp_rd_ptr_irq,
+ phys_enc);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
+
+end:
+ if (ret) {
+ DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret,
+ enable ? "true" : "false", refcount);
+ }
+
+ return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ phys_enc);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_cmd_underrun_irq,
+ phys_enc);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ phys_enc);
+ } else {
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START]);
+
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG]);
+ }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct dpu_kms *dpu_kms;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ mode = &phys_enc->cached_mode;
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+ if (vsync_hz <= 0) {
+ DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+ vsync_hz);
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz /
+ (mode->vtotal * drm_mode_vrefresh(mode));
+
+ /*
+ * Set the sync_cfg_height to twice vtotal so that if we lose a
+ * TE event coming from the display TE pin we won't stall immediately
+ */
+ tc_cfg.hw_vsync_mode = 1;
+ tc_cfg.sync_cfg_height = mode->vtotal * 2;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, drm_mode_vrefresh(mode));
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+ dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * we do separate flush for each CTL and let
+ * CTL_START synchronize them
+ */
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid phys encoder\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+ DPU_ERROR("already enabled\n");
+ return;
+ }
+
+ dpu_encoder_phys_cmd_enable_helper(phys_enc);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+ struct dpu_encoder_phys *phys_enc, bool enable)
+{
+ if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+ struct dpu_encoder_phys *phys_enc)
+{
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pingpong *hw_pp;
+
+ if (!phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_line_count)
+ return -EINVAL;
+
+ return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ if (phys_enc->hw_pp->ops.enable_tearcheck)
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ false,
+ phys_enc->hw_pp->idx);
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+ DRMID(phys_enc->parent), ret,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pp_vsync_info info;
+
+ if (!phys_enc)
+ return false;
+
+ phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
+ if (info.wr_ptr_line_count > 0 &&
+ info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
+ return true;
+
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_prepare_commit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int trial = 0;
+
+ if (!phys_enc)
+ return;
+ if (!phys_enc->hw_pp)
+ return;
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return;
+
+ /* If autorefresh is already disabled, we have nothing to do */
+ if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
+ return;
+
+ /*
+ * If autorefresh is enabled, disable it and make sure it is safe to
+ * proceed with current frame commit/push. Sequence fallowed is,
+ * 1. Disable TE
+ * 2. Disable autorefresh config
+ * 4. Poll for frame transfer ongoing to be false
+ * 5. Enable TE back
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+ phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
+
+ do {
+ udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
+ if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
+ > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
+ DPU_ERROR_CMDENC(cmd_enc,
+ "disable autorefresh failed\n");
+ break;
+ }
+
+ trial++;
+ } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
+
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
+ "disabled autorefresh\n");
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ &wait_info);
+ if (ret == -ETIMEDOUT) {
+ DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+ ret = -EINVAL;
+ } else if (!ret)
+ ret = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc;
+
+ rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (rc) {
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+ DRMID(phys_enc->parent), rc,
+ phys_enc->intf_idx - INTF_0);
+ }
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return 0;
+
+ if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
+ return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
+
+ return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+ struct dpu_encoder_wait_info wait_info;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return rc;
+
+ wait_info.wq = &cmd_enc->pending_vblank_wq;
+ wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_pp_rd_ptr_irq,
+ &wait_info);
+
+ return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+ struct dpu_encoder_phys_ops *ops)
+{
+ ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
+ ops->is_master = dpu_encoder_phys_cmd_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+ ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+ ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->restore = dpu_encoder_phys_cmd_enable_helper;
+ ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+ ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+ int i, ret = 0;
+
+ DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ DPU_ERROR("failed to allocate\n");
+ return ERR_PTR(ret);
+ }
+ phys_enc = &cmd_enc->base;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 000000000..2c1464666
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+ container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ DPU_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (phys_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /* for DP/EDP, Shift timings to align it to bottom right */
+ if (phys_enc->hw_intf->cap->type == INTF_DP) {
+ timing->h_back_porch += timing->h_front_porch;
+ timing->h_front_porch = 0;
+ timing->v_back_porch += timing->v_front_porch;
+ timing->v_front_porch = 0;
+ }
+
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+
+ /*
+ * for DP, divide the horizonal parameters by 2 when
+ * widebus is enabled
+ */
+ if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
+ timing->width = timing->width >> 1;
+ timing->xres = timing->xres >> 1;
+ timing->h_back_porch = timing->h_back_porch >> 1;
+ timing->h_front_porch = timing->h_front_porch >> 1;
+ timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
+ }
+}
+
+static u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct dpu_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DPU_DEBUG_VIDENC(phys_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DPU_DEBUG_VIDENC(phys_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DPU_DEBUG_VIDENC(phys_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DPU_DEBUG_VIDENC(phys_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DPU_DEBUG_VIDENC(phys_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct dpu_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ if (!phys_enc->hw_intf->ops.setup_timing_gen) {
+ DPU_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ DPU_DEBUG_VIDENC(phys_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
+
+ fmt = dpu_get_dpu_format(fmt_fourcc);
+ DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ if (phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ true,
+ phys_enc->hw_pp->idx);
+
+ if (phys_enc->hw_pp->merge_3d)
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, intf_cfg.mode_3d);
+
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+
+ hw_ctl = phys_enc->hw_ctl;
+
+ DPU_ATRACE_BEGIN("vblank_irq");
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
+ DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return phys_enc->split_role != ENC_ROLE_SOLO;
+}
+
+static void dpu_encoder_phys_vid_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
+
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
+ phys_enc);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
+
+end:
+ if (ret) {
+ DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0, ret, enable,
+ refcount);
+ }
+ return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
+ return;
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
+
+ dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ /*
+ * For single flush cases (dual-ctl or pp-split), skip setting the
+ * flush bit for the slave intf, since both intfs use same ctl
+ * and HW will only flush the master.
+ */
+ if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+ !dpu_encoder_phys_vid_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
+ if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
+ ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
+
+skip_flush:
+ DPU_DEBUG_VIDENC(phys_enc,
+ "update pending flush ctl %d intf %d\n",
+ ctl->idx - CTL_0, phys_enc->hw_intf->idx);
+
+ atomic_set(&phys_enc->underrun_cnt, 0);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+ kfree(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+ return 0;
+ }
+
+ /* Wait for kickoff to complete */
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
+ &wait_info);
+
+ if (ret == -ETIMEDOUT) {
+ dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+ }
+
+ return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+ int ret;
+
+ if (!hw_ctl)
+ return 0;
+
+ ret = wait_event_timeout(phys_enc->pending_kickoff_wq,
+ (hw_ctl->ops.get_flush_register(hw_ctl) == 0),
+ msecs_to_jiffies(50));
+ if (ret <= 0) {
+ DPU_ERROR("vblank timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ int rc;
+ struct drm_encoder *drm_enc;
+
+ drm_enc = phys_enc->parent;
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ msm_disp_snapshot_state(drm_enc->dev);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
+ }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc->parent || !phys_enc->parent->dev) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+
+ if (!phys_enc->hw_intf) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL);
+ return;
+ }
+
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
+ if (dpu_encoder_phys_vid_is_master(phys_enc))
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+ ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+ trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+ }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret;
+
+ trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0,
+ enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (WARN_ON(ret))
+ return;
+
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_vid_underrun_irq,
+ phys_enc);
+ } else {
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
+ }
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
+ return -EINVAL;
+
+ return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
+}
+
+static int dpu_encoder_phys_vid_get_frame_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct intf_status s = {0};
+ u32 fetch_start = 0;
+ struct drm_display_mode mode = phys_enc->cached_mode;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status)
+ return -EINVAL;
+
+ phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s);
+
+ if (s.is_prog_fetch_en && s.is_en) {
+ fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay);
+ if ((s.line_count > fetch_start) &&
+ (s.line_count <= mode.vtotal))
+ return s.frame_count + 1;
+ }
+
+ return s.frame_count;
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_vid_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+ ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
+ ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+ ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+ ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ int i;
+
+ if (!p) {
+ DPU_ERROR("failed to create encoder due to invalid parameter\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ if (!phys_enc) {
+ DPU_ERROR("failed to create encoder due to memory allocation error\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+
+ dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
new file mode 100644
index 000000000..42c7e378d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_framebuffer.h>
+
+#include "dpu_encoder_phys.h"
+#include "dpu_formats.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_vbif.h"
+#include "dpu_crtc.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define to_dpu_encoder_phys_wb(x) \
+ container_of(x, struct dpu_encoder_phys_wb, base)
+
+/**
+ * dpu_encoder_phys_wb_is_master - report wb always as master encoder
+ */
+static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
+{
+ /* there is only one physical enc for dpu_writeback */
+ return true;
+}
+
+/**
+ * dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_ot_limit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_vbif_set_ot_params ot_params;
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = hw_wb->caps->xin_id;
+ ot_params.num = hw_wb->idx - WB_0;
+ ot_params.width = phys_enc->cached_mode.hdisplay;
+ ot_params.height = phys_enc->cached_mode.vdisplay;
+ ot_params.is_wfd = true;
+ ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
+ ot_params.vbif_idx = hw_wb->caps->vbif_idx;
+ ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ ot_params.rd = false;
+
+ dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos_remap(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_vbif_set_qos_params qos_params;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ if (!phys_enc->hw_wb || !phys_enc->hw_wb->caps) {
+ DPU_ERROR("invalid writeback hardware\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = hw_wb->caps->vbif_idx;
+ qos_params.xin_id = hw_wb->caps->xin_id;
+ qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ qos_params.num = hw_wb->idx - WB_0;
+ qos_params.is_rt = false;
+
+ DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
+ qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt);
+
+ dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_wb_qos_cfg qos_cfg;
+ const struct dpu_mdss_cfg *catalog;
+ const struct dpu_qos_lut_tbl *qos_lut_tb;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid parameter(s)\n");
+ return;
+ }
+
+ catalog = phys_enc->dpu_kms->catalog;
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_cfg, 0, sizeof(struct dpu_hw_wb_qos_cfg));
+ qos_cfg.danger_safe_en = true;
+ qos_cfg.danger_lut =
+ catalog->perf->danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_cfg.safe_lut = catalog->perf->safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_lut_tb = &catalog->perf->qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+ qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0);
+
+ if (hw_wb->ops.setup_qos_lut)
+ hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
+ * @phys_enc: Pointer to physical encoder
+ * @fb: Pointer to output framebuffer
+ */
+static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_wb_cfg *wb_cfg;
+ struct dpu_hw_cdp_cfg cdp_cfg;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ wb_cfg = &wb_enc->wb_cfg;
+
+ wb_cfg->intf_mode = phys_enc->intf_mode;
+ wb_cfg->roi.x1 = 0;
+ wb_cfg->roi.x2 = phys_enc->cached_mode.hdisplay;
+ wb_cfg->roi.y1 = 0;
+ wb_cfg->roi.y2 = phys_enc->cached_mode.vdisplay;
+
+ if (hw_wb->ops.setup_roi)
+ hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outformat)
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_cdp) {
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
+
+ cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf->cdp_cfg
+ [DPU_PERF_CDP_USAGE_NRT].wr_enable;
+ cdp_cfg.ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(wb_cfg->dest.format);
+ cdp_cfg.tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
+ DPU_FORMAT_IS_TILE(wb_cfg->dest.format);
+ cdp_cfg.preload_ahead = DPU_WB_CDP_PRELOAD_AHEAD_64;
+
+ hw_wb->ops.setup_cdp(hw_wb, &cdp_cfg);
+ }
+
+ if (hw_wb->ops.setup_outaddress)
+ hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * @phys_enc:Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ ctl = phys_enc->hw_ctl;
+
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
+ (phys_enc->hw_ctl &&
+ phys_enc->hw_ctl->ops.setup_intf_cfg)) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+ struct dpu_hw_pingpong *hw_pp = phys_enc->hw_pp;
+ enum dpu_3d_blend_mode mode_3d;
+
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+
+ if (mode_3d && hw_pp && hw_pp->merge_3d)
+ intf_cfg.merge_3d = hw_pp->merge_3d->idx;
+
+ if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ mode_3d);
+
+ /* setup which pp blk will connect to this wb */
+ if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, true,
+ phys_enc->hw_pp->idx);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+ intf_cfg.mode_3d =
+ dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ }
+}
+
+/**
+ * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
+ * @phys_enc: Pointer to physical encoder
+ * @crtc_state: Pointer to CRTC atomic state
+ * @conn_state: Pointer to connector atomic state
+ */
+static int dpu_encoder_phys_wb_atomic_check(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+
+ DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
+ phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
+
+ if (!conn_state || !conn_state->connector) {
+ DPU_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status !=
+ connector_status_connected) {
+ DPU_ERROR("connector not connected %d\n",
+ conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
+ DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ if (fb->width != mode->hdisplay) {
+ DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) {
+ DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, phys_enc->hw_wb->caps->maxlinewidth);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * _dpu_encoder_phys_wb_update_flush - flush hardware update
+ * @phys_enc: Pointer to physical encoder
+ */
+static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_pingpong *hw_pp;
+ u32 pending_flush = 0;
+
+ if (!phys_enc)
+ return;
+
+ hw_wb = phys_enc->hw_wb;
+ hw_pp = phys_enc->hw_pp;
+ hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
+ return;
+ }
+
+ if (hw_ctl->ops.update_pending_flush_wb)
+ hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
+
+ if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
+ hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
+ hw_pp->merge_3d->idx);
+
+ if (hw_ctl->ops.get_pending_flush)
+ pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
+
+ DPU_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
+ hw_ctl->idx - CTL_0, pending_flush,
+ hw_wb->idx - WB_0);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup - setup writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct drm_display_mode mode = phys_enc->cached_mode;
+ struct drm_framebuffer *fb = NULL;
+
+ DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode.name,
+ mode.hdisplay, mode.vdisplay);
+
+ dpu_encoder_phys_wb_set_ot_limit(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos_remap(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos(phys_enc);
+
+ dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
+
+ dpu_encoder_phys_wb_setup_cdp(phys_enc);
+
+}
+
+static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ unsigned long lock_flags;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+/**
+ * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg: Pointer to writeback encoder
+ * @irq_idx: interrupt index
+ */
+static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+ _dpu_encoder_phys_wb_frame_done_helper(arg);
+}
+
+/**
+ * dpu_encoder_phys_wb_irq_ctrl - irq control of WB
+ * @phys: Pointer to physical encoder
+ * @enable: indicates enable or disable interrupts
+ */
+static void dpu_encoder_phys_wb_irq_ctrl(
+ struct dpu_encoder_phys *phys, bool enable)
+{
+
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
+
+ if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
+ dpu_core_irq_register_callback(phys->dpu_kms,
+ phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
+ else if (!enable &&
+ atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
+ dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
+}
+
+static void dpu_encoder_phys_wb_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+
+ phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
+}
+
+static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+
+ wb_enc->wb_done_timeout_cnt++;
+
+ if (wb_enc->wb_done_timeout_cnt == 1)
+ msm_disp_snapshot_state(phys_enc->parent->dev);
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+}
+
+/**
+ * dpu_encoder_phys_wb_wait_for_commit_done - wait until request is committed
+ * @phys_enc: Pointer to physical encoder
+ */
+static int dpu_encoder_phys_wb_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long ret;
+ struct dpu_encoder_wait_info wait_info;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_WB_DONE],
+ dpu_encoder_phys_wb_done_irq, &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
+ else if (!ret)
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ return ret;
+}
+
+/**
+ * dpu_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ * Returns: Zero on success
+ */
+static void dpu_encoder_phys_wb_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct drm_connector *drm_conn;
+ struct drm_connector_state *state;
+
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+ if (!wb_enc->wb_conn || !wb_enc->wb_job) {
+ DPU_ERROR("invalid wb_conn or wb_job\n");
+ return;
+ }
+
+ drm_conn = &wb_enc->wb_conn->base;
+ state = drm_conn->state;
+
+ if (wb_enc->wb_conn && wb_enc->wb_job)
+ drm_writeback_queue_job(wb_enc->wb_conn, state);
+
+ dpu_encoder_phys_wb_setup(phys_enc);
+
+ _dpu_encoder_phys_wb_update_flush(phys_enc);
+}
+
+/**
+ * dpu_encoder_phys_wb_needs_single_flush - trigger flush processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static bool dpu_encoder_phys_wb_needs_single_flush(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+}
+
+/**
+ * dpu_encoder_phys_wb_enable - enable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+/**
+ * dpu_encoder_phys_wb_disable - disable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("encoder is already disabled\n");
+ return;
+ }
+
+ /* reset h/w before final flush */
+ if (phys_enc->hw_ctl->ops.clear_pending_flush)
+ phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+
+ /*
+ * New CTL reset sequence from 5.0 MDP onwards.
+ * If has_3d_merge_reset is not set, legacy reset
+ * sequence is executed.
+ *
+ * Legacy reset sequence has not been implemented yet.
+ * Any target earlier than SM8150 will need it and when
+ * WB support is added to those targets will need to add
+ * the legacy teardown sequence as well.
+ */
+ if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
+ dpu_encoder_helper_phys_cleanup(phys_enc);
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+/**
+ * dpu_encoder_phys_wb_destroy - destroy writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
+
+ kfree(phys_enc);
+}
+
+static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ const struct msm_format *format;
+ struct msm_gem_address_space *aspace;
+ struct dpu_hw_wb_cfg *wb_cfg;
+ int ret;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (!job->fb)
+ return;
+
+ wb_enc->wb_job = job;
+ wb_enc->wb_conn = job->connector;
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ wb_cfg = &wb_enc->wb_cfg;
+
+ memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
+
+ ret = msm_framebuffer_prepare(job->fb, aspace, false);
+ if (ret) {
+ DPU_ERROR("prep fb failed, %d\n", ret);
+ return;
+ }
+
+ format = msm_framebuffer_format(job->fb);
+
+ wb_cfg->dest.format = dpu_get_dpu_format_ext(
+ format->pixel_format, job->fb->modifier);
+ if (!wb_cfg->dest.format) {
+ /* this error should be detected during atomic_check */
+ DPU_ERROR("failed to get format %x\n", format->pixel_format);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
+ if (ret) {
+ DPU_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+
+ wb_cfg->dest.width = job->fb->width;
+ wb_cfg->dest.height = job->fb->height;
+ wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+
+ if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) &&
+ (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
+
+ DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_addr[1],
+ wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_addr[3]);
+
+ DPU_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_pitch[0], wb_cfg->dest.plane_pitch[1],
+ wb_cfg->dest.plane_pitch[2], wb_cfg->dest.plane_pitch[3]);
+}
+
+static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct msm_gem_address_space *aspace;
+
+ if (!job->fb)
+ return;
+
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ msm_framebuffer_cleanup(job->fb, aspace, false);
+ wb_enc->wb_job = NULL;
+ wb_enc->wb_conn = NULL;
+}
+
+static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (wb_enc->wb_job)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_wb_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_wb_enable;
+ ops->disable = dpu_encoder_phys_wb_disable;
+ ops->destroy = dpu_encoder_phys_wb_destroy;
+ ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
+ ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_wb_needs_single_flush;
+ ops->trigger_start = dpu_encoder_helper_trigger_start;
+ ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
+ ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
+ ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
+ ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
+
+}
+
+/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @p: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_wb *wb_enc = NULL;
+ int ret = 0;
+ int i;
+
+ DPU_DEBUG("\n");
+
+ if (!p || !p->parent) {
+ DPU_ERROR("invalid params\n");
+ ret = -EINVAL;
+ goto fail_alloc;
+ }
+
+ wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ if (!wb_enc) {
+ DPU_ERROR("failed to allocate wb phys_enc enc\n");
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ phys_enc = &wb_enc->base;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
+ phys_enc->wb_idx = p->wb_idx;
+
+ dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_WB_LINE;
+ phys_enc->wb_idx = p->wb_idx;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+
+ atomic_set(&wb_enc->wbirq_refcount, 0);
+
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
+
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG("Created dpu_encoder_phys for wb %d\n",
+ phys_enc->wb_idx);
+
+ return phys_enc;
+
+fail_alloc:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 000000000..f436a1f34
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H 16
+#define DPU_UBWC_META_BLOCK_SIZE 256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define DPU_TILE_HEIGHT_DEFAULT 1
+#define DPU_TILE_HEIGHT_TILED 4
+#define DPU_TILE_HEIGHT_UBWC 4
+#define DPU_TILE_HEIGHT_NV12 8
+
+#define DPU_MAX_IMG_WIDTH 0x3FFF
+#define DPU_MAX_IMG_HEIGHT 0x3FFF
+
+/*
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ /* ARGB8888 and ABGR8888 purposely have the same color
+ * ordering. The hardware only supports ABGR8888 UBWC
+ * natively.
+ */
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+ enum dpu_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case DPU_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case DPU_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case DPU_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+ static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ if (DPU_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ color_fmt = dpu_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _dpu_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (DPU_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+ const struct dpu_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t base_addr = 0;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (aspace)
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ return 0;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ return 0;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+ return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (aspace)
+ layout->plane_addr[i] =
+ msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+ (fb->height > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout, fb->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DPU_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (DPU_FORMAT_IS_UBWC(layout->format) ||
+ DPU_FORMAT_IS_TILE(layout->format))
+ ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ const struct drm_format_info *info;
+ const struct dpu_format *fmt;
+ struct dpu_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+ int ret, i;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_dpu_format(msm_fmt);
+ info = drm_format_info(fmt->base.pixel_format);
+ if (!info)
+ return -EINVAL;
+
+ ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout, cmd->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->num_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier)
+{
+ uint32_t i = 0;
+ const struct dpu_format *fmt = NULL;
+ const struct dpu_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes use the same modifier.
+ */
+ DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier);
+
+ switch (modifier) {
+ case 0:
+ map = dpu_format_map;
+ map_size = ARRAY_SIZE(dpu_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = dpu_format_map_ubwc;
+ map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+ DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
+ break;
+ default:
+ DPU_ERROR("unsupported format modifier %llX\n", modifier);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, modifier);
+ else
+ DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, modifier,
+ DPU_FORMAT_IS_UBWC(fmt),
+ DPU_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers)
+{
+ const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+ modifiers);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 000000000..84b8b3289
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_find_format - validate if the pixel format is supported
+ * @format: dpu format
+ * @supported_formats: supported formats by dpu HW
+ * @num_formatss: total number of formats
+ *
+ * Return: false if not valid format, true on success
+ */
+static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
+ size_t num_formats)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ /* check for valid formats supported */
+ if (format == supported_formats[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ * dpu non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 000000000..b2f330e99
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,1959 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_kms.h"
+
+#define VIG_BASE_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_CDP) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define VIG_MASK \
+ (VIG_BASE_MASK | \
+ BIT(DPU_SSPP_CSC_10BIT))
+
+#define VIG_MSM8998_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SDM845_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
+
+#define VIG_SC7180_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+
+#define VIG_SM8250_MASK \
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
+
+#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+
+#define DMA_MSM8998_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define VIG_SC7280_MASK \
+ (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+
+#define DMA_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_CURSOR_SDM845_MASK \
+ (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+
+#define DMA_CURSOR_MSM8998_MASK \
+ (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
+
+#define MIXER_MSM8998_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT))
+
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+
+#define MIXER_SC7180_MASK \
+ (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define CTL_SC7280_MASK \
+ (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE) | BIT(DPU_CTL_VM_CFG))
+
+#define MERGE_3D_SM8150_MASK (0)
+
+#define DSPP_MSM8998_MASK BIT(DPU_DSPP_PCC) | BIT(DPU_DSPP_GC)
+
+#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
+
+#define INTF_SDM845_MASK (0)
+
+#define INTF_SC7180_MASK \
+ (BIT(DPU_INTF_INPUT_CTRL) | \
+ BIT(DPU_INTF_TE) | \
+ BIT(DPU_INTF_STATUS_SUPPORTED) | \
+ BIT(DPU_DATA_HCTL_EN))
+
+#define INTF_SC7280_MASK (INTF_SC7180_MASK)
+
+#define IRQ_SDM845_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_INTR) | \
+ BIT(MDP_INTF1_INTR) | \
+ BIT(MDP_INTF2_INTR) | \
+ BIT(MDP_INTF3_INTR) | \
+ BIT(MDP_INTF4_INTR) | \
+ BIT(MDP_AD4_0_INTR) | \
+ BIT(MDP_AD4_1_INTR))
+
+#define IRQ_SC7180_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_INTR) | \
+ BIT(MDP_INTF1_INTR))
+
+#define IRQ_SC7280_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_7xxx_INTR) | \
+ BIT(MDP_INTF1_7xxx_INTR) | \
+ BIT(MDP_INTF5_7xxx_INTR))
+
+#define IRQ_SM8250_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_INTR) | \
+ BIT(MDP_INTF1_INTR) | \
+ BIT(MDP_INTF2_INTR) | \
+ BIT(MDP_INTF3_INTR) | \
+ BIT(MDP_INTF4_INTR))
+
+#define IRQ_SC8180X_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_INTR) | \
+ BIT(MDP_INTF1_INTR) | \
+ BIT(MDP_INTF2_INTR) | \
+ BIT(MDP_INTF3_INTR) | \
+ BIT(MDP_INTF4_INTR) | \
+ BIT(MDP_INTF5_INTR) | \
+ BIT(MDP_AD4_0_INTR) | \
+ BIT(MDP_AD4_1_INTR))
+
+#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
+ BIT(DPU_WB_UBWC) | \
+ BIT(DPU_WB_YUV_CONFIG) | \
+ BIT(DPU_WB_PIPE_ALPHA) | \
+ BIT(DPU_WB_XY_ROI_OFFSET) | \
+ BIT(DPU_WB_QOS) | \
+ BIT(DPU_WB_QOS_8LVL) | \
+ BIT(DPU_WB_CDP) | \
+ BIT(DPU_WB_INPUT_CTRL))
+
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH 2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+#define SSPP_UNITY_SCALE 1
+
+#define STRCAT(X, Y) (X Y)
+
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
+};
+
+static const uint32_t plane_formats_yuv[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
+
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+};
+
+static const u32 rotation_v2_formats[] = {
+ DRM_FORMAT_NV12,
+ /* TODO add formats after validation */
+};
+
+static const uint32_t wb2_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_XBGR4444,
+};
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps msm8998_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V1,
+ .ubwc_version = DPU_HW_UBWC_VER_10,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_caps qcm2290_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_caps sdm845_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_caps sc7180_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x9,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_caps sm8150_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_30,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_caps sc8180x_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_30,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_caps sm8250_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_40,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_caps sc7280_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_30,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2400,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg msm8998_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x458,
+ .features = 0,
+ .highest_bank_bit = 0x2,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
+ .reg_off = 0x2C4, .bit_off = 12},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x3A8, .bit_off = 15},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x3B0, .bit_off = 15},
+ },
+};
+
+static const struct dpu_mdp_cfg sdm845_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = BIT(DPU_MDP_AUDIO_SELECT),
+ .highest_bank_bit = 0x2,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+static const struct dpu_mdp_cfg sc7180_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 0x3,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_WB2] = {
+ .reg_off = 0x3B8, .bit_off = 24},
+ },
+};
+
+static const struct dpu_mdp_cfg sc8180x_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = 0,
+ .highest_bank_bit = 0x3,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+static const struct dpu_mdp_cfg sm8250_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
+ .reg_off = 0x2BC, .bit_off = 20},
+ .clk_ctrls[DPU_CLK_CTRL_WB2] = {
+ .reg_off = 0x3B8, .bit_off = 24},
+ },
+};
+
+static const struct dpu_mdp_cfg sc7280_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x2014,
+ .highest_bank_bit = 0x1,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+static const struct dpu_mdp_cfg qcm2290_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 0x2,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ },
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static const struct dpu_ctl_cfg msm8998_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x94,
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x94,
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x94,
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_ctl_cfg sdm845_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xE4,
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xE4,
+ .features = 0,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_ctl_cfg sc7180_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+};
+
+static const struct dpu_ctl_cfg sm8150_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+ {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a00, .len = 0x1e0,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_ctl_cfg sc7280_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1E8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1E8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1E8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1E8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+};
+
+static const struct dpu_ctl_cfg qcm2290_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x1dc,
+ .features = BIT(DPU_CTL_ACTIVE_CFG),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = qseed_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = NULL, \
+ }
+
+#define _VIG_SBLK_ROT(num, sdma_pri, qseed_ver, rot_cfg) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = qseed_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = rot_cfg, \
+ }
+
+#define _DMA_SBLK(num, sdma_pri) \
+ { \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
+ _VIG_SBLK("0", 0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
+ _VIG_SBLK("1", 0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
+ _VIG_SBLK("2", 0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
+ _VIG_SBLK("3", 0, DPU_SSPP_SCALER_QSEED3);
+
+static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ .rot_maxheight = 1088,
+ .rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
+ .rot_format_list = rotation_v2_formats,
+};
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_BLK(_name, _id, _base, _features, \
+ _sblk, _xinid, _type, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = _features, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = _type, \
+ .clk_ctrl = _clkctrl \
+ }
+
+static const struct dpu_sspp_cfg msm8998_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_MSM8998_MASK,
+ msm8998_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_MSM8998_MASK,
+ msm8998_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_MSM8998_MASK,
+ msm8998_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_MSM8998_MASK,
+ msm8998_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_MSM8998_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_MSM8998_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_MSM8998_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_MSM8998_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+};
+
+static const struct dpu_sspp_cfg sdm845_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
+static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
+ _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
+ _VIG_SBLK_ROT("0", 4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
+
+static const struct dpu_sspp_cfg sc7180_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
+
+static const struct dpu_sspp_cfg sm8250_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
+ sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
+ sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
+ sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
+static const struct dpu_sspp_cfg sc7280_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7280_MASK,
+ sc7280_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
+
+#define _VIG_SBLK_NOSCALE(num, sdma_pri) \
+ { \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE("0", 2);
+static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK("8", 1);
+
+static const struct dpu_sspp_cfg qcm2290_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_QCM2290_MASK,
+ qcm2290_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ qcm2290_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x320, \
+ .features = _fmask, \
+ .sblk = _sblk, \
+ .pingpong = _pp, \
+ .lm_pair_mask = (1 << _lmpair), \
+ .dspp = _dspp \
+ }
+
+/* MSM8998 */
+
+static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x50, 0x80, 0xb0, 0x230,
+ 0x260, 0x290
+ },
+};
+
+static const struct dpu_lm_cfg msm8998_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK,
+ &msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK,
+ &msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK,
+ &msm8998_lm_sblk, PINGPONG_2, LM_0, 0),
+ LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK,
+ &msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
+ LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK,
+ &msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK,
+ &msm8998_lm_sblk, PINGPONG_3, LM_1, 0),
+};
+
+/* SDM845 */
+
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+ 0xb0, 0xc8, 0xe0, 0xf8, 0x110
+ },
+};
+
+static const struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_2, LM_5, 0),
+ LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
+ LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
+};
+
+/* SC7180 */
+
+static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
+ },
+};
+
+static const struct dpu_lm_cfg sc7180_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
+};
+
+/* SM8150 */
+
+static const struct dpu_lm_cfg sm8150_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
+ LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
+ LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
+ &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+};
+
+static const struct dpu_lm_cfg sc7280_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_0, 0, DSPP_0),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_2, LM_3, 0),
+ LM_BLK("lm_3", LM_3, 0x47000, MIXER_SC7180_MASK,
+ &sc7180_lm_sblk, PINGPONG_3, LM_2, 0),
+};
+
+/* QCM2290 */
+
+static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 4, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68
+ },
+};
+
+static const struct dpu_lm_cfg qcm2290_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ &qcm2290_lm_sblk, PINGPONG_0, 0, DSPP_0),
+};
+
+/*************************************************************
+ * DSPP sub blocks config
+ *************************************************************/
+static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
+ .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x10007},
+ .gc = { .id = DPU_DSPP_GC, .base = 0x17c0,
+ .len = 0x90, .version = 0x10007},
+};
+
+static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
+ .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x10000},
+};
+
+static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
+ .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x40000},
+};
+
+#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1800, \
+ .features = _mask, \
+ .sblk = _sblk \
+ }
+
+static const struct dpu_dspp_cfg msm8998_dspp[] = {
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_MSM8998_MASK,
+ &msm8998_dspp_sblk),
+ DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_MSM8998_MASK,
+ &msm8998_dspp_sblk),
+};
+
+static const struct dpu_dspp_cfg sc7180_dspp[] = {
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sc7180_dspp_sblk),
+};
+
+static const struct dpu_dspp_cfg sm8150_dspp[] = {
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+ DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+};
+
+static const struct dpu_dspp_cfg qcm2290_dspp[] = {
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK,
+ &sm8150_dspp_sblk),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+ .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0xe0,
+ .len = 0x20, .version = 0x20000},
+};
+
+#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_SPLIT_MASK, \
+ .merge_3d = _merge_3d, \
+ .sblk = &_sblk, \
+ .intr_done = _done, \
+ .intr_rdptr = _rdptr, \
+ }
+#define PP_BLK(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_MASK, \
+ .merge_3d = _merge_3d, \
+ .sblk = &_sblk, \
+ .intr_done = _done, \
+ .intr_rdptr = _rdptr, \
+ }
+
+static const struct dpu_pingpong_cfg sdm845_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+};
+
+static struct dpu_pingpong_cfg sc7180_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1),
+};
+
+static const struct dpu_pingpong_cfg sm8150_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+ PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ -1),
+ PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ -1),
+};
+
+static const struct dpu_pingpong_cfg sc7280_pp[] = {
+ PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
+};
+
+static struct dpu_pingpong_cfg qcm2290_pp[] = {
+ PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+};
+
+/*************************************************************
+ * MERGE_3D sub blocks config
+ *************************************************************/
+#define MERGE_3D_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x8, \
+ .features = MERGE_3D_SM8150_MASK, \
+ .sblk = NULL \
+ }
+
+static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
+ MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x83000),
+ MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x83100),
+ MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
+};
+
+/*************************************************************
+ * DSC sub blocks config
+ *************************************************************/
+#define DSC_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x140, \
+ .features = 0, \
+ }
+
+static struct dpu_dsc_cfg sdm845_dsc[] = {
+ DSC_BLK("dsc_0", DSC_0, 0x80000),
+ DSC_BLK("dsc_1", DSC_1, 0x80400),
+ DSC_BLK("dsc_2", DSC_2, 0x80800),
+ DSC_BLK("dsc_3", DSC_3, 0x80c00),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features, _reg, _underrun_bit, _vsync_bit) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x280, \
+ .features = _features, \
+ .type = _type, \
+ .controller_id = _ctrl_id, \
+ .prog_fetch_lines_worst_case = _progfetch, \
+ .intr_underrun = DPU_IRQ_IDX(_reg, _underrun_bit), \
+ .intr_vsync = DPU_IRQ_IDX(_reg, _vsync_bit), \
+ }
+
+static const struct dpu_intf_cfg msm8998_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_HDMI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+};
+
+static const struct dpu_intf_cfg sdm845_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+};
+
+static const struct dpu_intf_cfg sc7180_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+};
+
+static const struct dpu_intf_cfg sm8150_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+};
+
+static const struct dpu_intf_cfg sc7280_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
+};
+
+static const struct dpu_intf_cfg sc8180x_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ /* INTF_3 is for MST, wired to INTF_DP 0 and 1, use dummy index until this is supported */
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 999, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+ INTF_BLK("intf_4", INTF_4, 0x6C000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 20, 21),
+ INTF_BLK("intf_5", INTF_5, 0x6C800, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
+};
+
+static const struct dpu_intf_cfg qcm2290_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x00000, INTF_NONE, 0, 0, 0, 0, 0, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+};
+
+/*************************************************************
+ * Writeback blocks config
+ *************************************************************/
+#define WB_BLK(_name, _id, _base, _features, _clk_ctrl, \
+ __xin_id, vbif_id, _reg, _max_linewidth, _wb_done_bit) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x2c8, \
+ .features = _features, \
+ .format_list = wb2_formats, \
+ .num_formats = ARRAY_SIZE(wb2_formats), \
+ .clk_ctrl = _clk_ctrl, \
+ .xin_id = __xin_id, \
+ .vbif_idx = vbif_id, \
+ .maxlinewidth = _max_linewidth, \
+ .intr_wb_done = DPU_IRQ_IDX(_reg, _wb_done_bit) \
+ }
+
+static const struct dpu_wb_cfg sm8250_wb[] = {
+ WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
+ VBIF_RT, MDP_SSPP_TOP0_INTR, 4096, 4),
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static const u32 msm8998_rt_pri_lvl[] = {1, 2, 2, 2};
+static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1};
+static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
+ {
+ .pps = 1088 * 1920 * 30,
+ .ot_limit = 2,
+ },
+ {
+ .pps = 1088 * 1920 * 60,
+ .ot_limit = 6,
+ },
+ {
+ .pps = 3840 * 2160 * 30,
+ .ot_limit = 16,
+ },
+};
+
+static const struct dpu_vbif_cfg msm8998_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .default_ot_rd_limit = 32,
+ .default_ot_wr_limit = 32,
+ .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x20,
+ .dynamic_ot_rd_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .dynamic_ot_wr_tbl = {
+ .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg),
+ .cfg = msm8998_ot_rdwr_cfg,
+ },
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl),
+ .priority_lvl = msm8998_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl),
+ .priority_lvl = msm8998_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2},
+ },
+};
+
+static const struct dpu_vbif_cfg sdm845_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+static const struct dpu_reg_dma_cfg sdm845_regdma = {
+ .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+static const struct dpu_reg_dma_cfg sm8150_regdma = {
+ .base = 0x0, .version = 0x00010001, .trigger_sel_off = 0x119c
+};
+
+static const struct dpu_reg_dma_cfg sm8250_regdma = {
+ .base = 0x0,
+ .version = 0x00010002,
+ .trigger_sel_off = 0x119c,
+ .xin_id = 7,
+ .clk_ctrl = DPU_CLK_CTRL_REG_DMA,
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static const struct dpu_qos_lut_entry msm8998_qos_linear[] = {
+ {.fl = 4, .lut = 0x1b},
+ {.fl = 5, .lut = 0x5b},
+ {.fl = 6, .lut = 0x15b},
+ {.fl = 7, .lut = 0x55b},
+ {.fl = 8, .lut = 0x155b},
+ {.fl = 9, .lut = 0x555b},
+ {.fl = 10, .lut = 0x1555b},
+ {.fl = 11, .lut = 0x5555b},
+ {.fl = 12, .lut = 0x15555b},
+ {.fl = 13, .lut = 0x55555b},
+ {.fl = 14, .lut = 0},
+ {.fl = 1, .lut = 0x1b},
+ {.fl = 0, .lut = 0}
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+ {.fl = 4, .lut = 0x357},
+ {.fl = 5, .lut = 0x3357},
+ {.fl = 6, .lut = 0x23357},
+ {.fl = 7, .lut = 0x223357},
+ {.fl = 8, .lut = 0x2223357},
+ {.fl = 9, .lut = 0x22223357},
+ {.fl = 10, .lut = 0x222223357},
+ {.fl = 11, .lut = 0x2222223357},
+ {.fl = 12, .lut = 0x22222223357},
+ {.fl = 13, .lut = 0x222222223357},
+ {.fl = 14, .lut = 0x1222222223357},
+ {.fl = 0, .lut = 0x11222222223357}
+};
+
+static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x1aaff},
+ {.fl = 11, .lut = 0x5aaff},
+ {.fl = 12, .lut = 0x15aaff},
+ {.fl = 13, .lut = 0x55aaff},
+ {.fl = 1, .lut = 0x1aaff},
+ {.fl = 0, .lut = 0},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sm8150_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222223357 },
+};
+
+static const struct dpu_qos_lut_entry sc8180x_qos_linear[] = {
+ {.fl = 4, .lut = 0x0000000000000357 },
+};
+
+static const struct dpu_qos_lut_entry qcm2290_qos_linear[] = {
+ {.fl = 0, .lut = 0x0011222222335777},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x344556677},
+ {.fl = 11, .lut = 0x3344556677},
+ {.fl = 12, .lut = 0x23344556677},
+ {.fl = 13, .lut = 0x223344556677},
+ {.fl = 14, .lut = 0x1223344556677},
+ {.fl = 0, .lut = 0x112233344556677},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = {
+ {.fl = 0, .lut = 0x0011223344556677},
+};
+
+static const struct dpu_qos_lut_entry sc8180x_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x0000000344556677},
+};
+
+static const struct dpu_qos_lut_entry msm8998_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static const struct dpu_perf_cfg msm8998_perf_data = {
+ .max_bw_low = 6700000,
+ .max_bw_high = 6700000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 25,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 200,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sdm845_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+ .entries = sdm845_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+ .entries = sdm845_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+ .entries = sdm845_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sc7180_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sm8150_perf_data = {
+ .max_bw_low = 12800000,
+ .max_bw_high = 12800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sm8150_qos_linear),
+ .entries = sm8150_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sc8180x_perf_data = {
+ .max_bw_low = 9600000,
+ .max_bw_high = 9600000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc8180x_qos_linear),
+ .entries = sc8180x_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc8180x_qos_macrotile),
+ .entries = sc8180x_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sm8250_perf_data = {
+ .max_bw_low = 13700000,
+ .max_bw_high = 16600000,
+ .min_core_ib = 4800000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sc7280_perf_data = {
+ .max_bw_low = 4700000,
+ .max_bw_high = 8800000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xffff, 0xffff, 0x0},
+ .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg qcm2290_perf_data = {
+ .max_bw_low = 2700000,
+ .max_bw_high = 2700000,
+ .min_core_ib = 1300000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 1600000,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xff, 0x0, 0x0},
+ .safe_lut_tbl = {0xfff0, 0x0, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(qcm2290_qos_linear),
+ .entries = qcm2290_qos_linear
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+/*************************************************************
+ * Hardware catalog
+ *************************************************************/
+
+static const struct dpu_mdss_cfg msm8998_dpu_cfg = {
+ .caps = &msm8998_dpu_caps,
+ .mdp_count = ARRAY_SIZE(msm8998_mdp),
+ .mdp = msm8998_mdp,
+ .ctl_count = ARRAY_SIZE(msm8998_ctl),
+ .ctl = msm8998_ctl,
+ .sspp_count = ARRAY_SIZE(msm8998_sspp),
+ .sspp = msm8998_sspp,
+ .mixer_count = ARRAY_SIZE(msm8998_lm),
+ .mixer = msm8998_lm,
+ .dspp_count = ARRAY_SIZE(msm8998_dspp),
+ .dspp = msm8998_dspp,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .intf_count = ARRAY_SIZE(msm8998_intf),
+ .intf = msm8998_intf,
+ .vbif_count = ARRAY_SIZE(msm8998_vbif),
+ .vbif = msm8998_vbif,
+ .reg_dma_count = 0,
+ .perf = &msm8998_perf_data,
+ .mdss_irqs = IRQ_SM8250_MASK,
+};
+
+static const struct dpu_mdss_cfg sdm845_dpu_cfg = {
+ .caps = &sdm845_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm845_dsc),
+ .dsc = sdm845_dsc,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sdm845_regdma,
+ .perf = &sdm845_perf_data,
+ .mdss_irqs = IRQ_SDM845_MASK,
+};
+
+static const struct dpu_mdss_cfg sc7180_dpu_cfg = {
+ .caps = &sc7180_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc7180_mdp),
+ .mdp = sc7180_mdp,
+ .ctl_count = ARRAY_SIZE(sc7180_ctl),
+ .ctl = sc7180_ctl,
+ .sspp_count = ARRAY_SIZE(sc7180_sspp),
+ .sspp = sc7180_sspp,
+ .mixer_count = ARRAY_SIZE(sc7180_lm),
+ .mixer = sc7180_lm,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
+ .pingpong_count = ARRAY_SIZE(sc7180_pp),
+ .pingpong = sc7180_pp,
+ .intf_count = ARRAY_SIZE(sc7180_intf),
+ .intf = sc7180_intf,
+ .wb_count = ARRAY_SIZE(sm8250_wb),
+ .wb = sm8250_wb,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sdm845_regdma,
+ .perf = &sc7180_perf_data,
+ .mdss_irqs = IRQ_SC7180_MASK,
+};
+
+static const struct dpu_mdss_cfg sm8150_dpu_cfg = {
+ .caps = &sm8150_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
+ .merge_3d = sm8150_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8150_intf),
+ .intf = sm8150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8150_regdma,
+ .perf = &sm8150_perf_data,
+ .mdss_irqs = IRQ_SDM845_MASK,
+};
+
+static const struct dpu_mdss_cfg sc8180x_dpu_cfg = {
+ .caps = &sc8180x_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc8180x_mdp),
+ .mdp = sc8180x_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
+ .merge_3d = sm8150_merge_3d,
+ .intf_count = ARRAY_SIZE(sc8180x_intf),
+ .intf = sc8180x_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8150_regdma,
+ .perf = &sc8180x_perf_data,
+ .mdss_irqs = IRQ_SC8180X_MASK,
+};
+
+static const struct dpu_mdss_cfg sm8250_dpu_cfg = {
+ .caps = &sm8250_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sm8250_mdp),
+ .mdp = sm8250_mdp,
+ .ctl_count = ARRAY_SIZE(sm8150_ctl),
+ .ctl = sm8150_ctl,
+ .sspp_count = ARRAY_SIZE(sm8250_sspp),
+ .sspp = sm8250_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8150_pp),
+ .pingpong = sm8150_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
+ .merge_3d = sm8150_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8150_intf),
+ .intf = sm8150_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .wb_count = ARRAY_SIZE(sm8250_wb),
+ .wb = sm8250_wb,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8250_regdma,
+ .perf = &sm8250_perf_data,
+ .mdss_irqs = IRQ_SM8250_MASK,
+};
+
+static const struct dpu_mdss_cfg sc7280_dpu_cfg = {
+ .caps = &sc7280_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc7280_mdp),
+ .mdp = sc7280_mdp,
+ .ctl_count = ARRAY_SIZE(sc7280_ctl),
+ .ctl = sc7280_ctl,
+ .sspp_count = ARRAY_SIZE(sc7280_sspp),
+ .sspp = sc7280_sspp,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
+ .mixer_count = ARRAY_SIZE(sc7280_lm),
+ .mixer = sc7280_lm,
+ .pingpong_count = ARRAY_SIZE(sc7280_pp),
+ .pingpong = sc7280_pp,
+ .intf_count = ARRAY_SIZE(sc7280_intf),
+ .intf = sc7280_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sc7280_perf_data,
+ .mdss_irqs = IRQ_SC7280_MASK,
+};
+
+static const struct dpu_mdss_cfg qcm2290_dpu_cfg = {
+ .caps = &qcm2290_dpu_caps,
+ .mdp_count = ARRAY_SIZE(qcm2290_mdp),
+ .mdp = qcm2290_mdp,
+ .ctl_count = ARRAY_SIZE(qcm2290_ctl),
+ .ctl = qcm2290_ctl,
+ .sspp_count = ARRAY_SIZE(qcm2290_sspp),
+ .sspp = qcm2290_sspp,
+ .mixer_count = ARRAY_SIZE(qcm2290_lm),
+ .mixer = qcm2290_lm,
+ .dspp_count = ARRAY_SIZE(qcm2290_dspp),
+ .dspp = qcm2290_dspp,
+ .pingpong_count = ARRAY_SIZE(qcm2290_pp),
+ .pingpong = qcm2290_pp,
+ .intf_count = ARRAY_SIZE(qcm2290_intf),
+ .intf = qcm2290_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &qcm2290_perf_data,
+ .mdss_irqs = IRQ_SC7180_MASK,
+};
+
+static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_300, .dpu_cfg = &msm8998_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_301, .dpu_cfg = &msm8998_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_400, .dpu_cfg = &sdm845_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_401, .dpu_cfg = &sdm845_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_500, .dpu_cfg = &sm8150_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_501, .dpu_cfg = &sm8150_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_510, .dpu_cfg = &sc8180x_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_600, .dpu_cfg = &sm8250_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_620, .dpu_cfg = &sc7180_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_650, .dpu_cfg = &qcm2290_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_720, .dpu_cfg = &sc7280_dpu_cfg},
+};
+
+const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev)
+ return cfg_handler[i].dpu_cfg;
+ }
+
+ DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+
+ return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 000000000..77c46ce5a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,895 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev) ((rev) >> 28)
+#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev) ((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \
+ (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sm8150 v1.0 */
+#define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */
+#define DPU_HW_VER_510 DPU_HW_VER(5, 1, 1) /* sc8180 */
+#define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */
+#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
+#define DPU_HW_VER_650 DPU_HW_VER(6, 5, 0) /* qcm2290|sm4125 */
+#define DPU_HW_VER_720 DPU_HW_VER(7, 2, 0) /* sc7280 */
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
+#define IS_SC7280_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_720)
+
+#define DPU_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+ DPU_HW_UBWC_VER_10 = 0x100,
+ DPU_HW_UBWC_VER_20 = 0x200,
+ DPU_HW_UBWC_VER_30 = 0x300,
+ DPU_HW_UBWC_VER_40 = 0x400,
+};
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX Maximum value
+
+ */
+enum {
+ DPU_MDP_PANIC_PER_PIPE = 0x1,
+ DPU_MDP_10BIT_SUPPORT,
+ DPU_MDP_BWC,
+ DPU_MDP_UBWC_1_0,
+ DPU_MDP_UBWC_1_5,
+ DPU_MDP_AUDIO_SELECT,
+ DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
+ * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
+ * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC, Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_INLINE_ROTATION Support inline rotation
+ * @DPU_SSPP_MAX maximum value
+ */
+enum {
+ DPU_SSPP_SRC = 0x1,
+ DPU_SSPP_SCALER_QSEED2,
+ DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_QSEED3LITE,
+ DPU_SSPP_SCALER_QSEED4,
+ DPU_SSPP_SCALER_RGB,
+ DPU_SSPP_CSC,
+ DPU_SSPP_CSC_10BIT,
+ DPU_SSPP_CURSOR,
+ DPU_SSPP_QOS,
+ DPU_SSPP_QOS_8LVL,
+ DPU_SSPP_EXCL_RECT,
+ DPU_SSPP_SMART_DMA_V1,
+ DPU_SSPP_SMART_DMA_V2,
+ DPU_SSPP_TS_PREFILL,
+ DPU_SSPP_TS_PREFILL_REC1,
+ DPU_SSPP_CDP,
+ DPU_SSPP_INLINE_ROTATION,
+ DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC Gamma correction block
+ * @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register
+ * @DPU_MIXER_MAX maximum value
+ */
+enum {
+ DPU_MIXER_LAYER = 0x1,
+ DPU_MIXER_SOURCESPLIT,
+ DPU_MIXER_GC,
+ DPU_DIM_LAYER,
+ DPU_MIXER_COMBINED_ALPHA,
+ DPU_MIXER_MAX
+};
+
+/**
+ * DSPP sub-blocks
+ * @DPU_DSPP_PCC Panel color correction block
+ * @DPU_DSPP_GC Gamma correction block
+ */
+enum {
+ DPU_DSPP_PCC = 0x1,
+ DPU_DSPP_GC,
+ DPU_DSPP_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE Tear check block
+ * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER, Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+ DPU_PINGPONG_TE = 0x1,
+ DPU_PINGPONG_TE2,
+ DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SLAVE,
+ DPU_PINGPONG_DITHER,
+ DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
+ * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
+ * @DPU_CTL_VM_CFG: CTL config to support multiple VMs
+ * @DPU_CTL_MAX
+ */
+enum {
+ DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_ACTIVE_CFG,
+ DPU_CTL_FETCH_ACTIVE,
+ DPU_CTL_VM_CFG,
+ DPU_CTL_MAX
+};
+
+/**
+ * INTF sub-blocks
+ * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which
+ * pixel data arrives to this INTF
+ * @DPU_INTF_TE INTF block has TE configuration support
+ * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate
+ * than video timing
+ * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register
+ * @DPU_INTF_MAX
+ */
+enum {
+ DPU_INTF_INPUT_CTRL = 0x1,
+ DPU_INTF_TE,
+ DPU_DATA_HCTL_EN,
+ DPU_INTF_STATUS_SUPPORTED,
+ DPU_INTF_MAX
+};
+
+/**
+ * WB sub-blocks and features
+ * @DPU_WB_LINE_MODE Writeback module supports line/linear mode
+ * @DPU_WB_BLOCK_MODE Writeback module supports block mode read
+ * @DPU_WB_CHROMA_DOWN, Writeback chroma down block,
+ * @DPU_WB_DOWNSCALE, Writeback integer downscaler,
+ * @DPU_WB_DITHER, Dither block
+ * @DPU_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
+ * @DPU_WB_UBWC, Writeback Universal bandwidth compression
+ * @DPU_WB_YUV_CONFIG Writeback supports output of YUV colorspace
+ * @DPU_WB_PIPE_ALPHA Writeback supports pipe alpha
+ * @DPU_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
+ * the destination image
+ * @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
+ * @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
+ * @DPU_WB_CDP Writeback supports client driven prefetch
+ * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
+ * data arrives.
+ * @DPU_WB_CROP CWB supports cropping
+ * @DPU_WB_MAX maximum value
+ */
+enum {
+ DPU_WB_LINE_MODE = 0x1,
+ DPU_WB_BLOCK_MODE,
+ DPU_WB_UBWC,
+ DPU_WB_YUV_CONFIG,
+ DPU_WB_PIPE_ALPHA,
+ DPU_WB_XY_ROI_OFFSET,
+ DPU_WB_QOS,
+ DPU_WB_QOS_8LVL,
+ DPU_WB_CDP,
+ DPU_WB_INPUT_CTRL,
+ DPU_WB_CROP,
+ DPU_WB_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX maximum value
+ */
+enum {
+ DPU_VBIF_QOS_OTLIM = 0x1,
+ DPU_VBIF_QOS_REMAP,
+ DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct dpu_csc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+ DPU_QOS_LUT_USAGE_LINEAR,
+ DPU_QOS_LUT_USAGE_MACROTILE,
+ DPU_QOS_LUT_USAGE_NRT,
+ DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+ u32 nentry;
+ const struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_rotation_cfg - define inline rotation config
+ * @rot_maxheight: max pre rotated height allowed for rotation
+ * @rot_num_formats: number of elements in @rot_format_list
+ * @rot_format_list: list of supported rotator formats
+ */
+struct dpu_rotation_cfg {
+ u32 rot_maxheight;
+ size_t rot_num_formats;
+ const u32 *rot_format_list;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @qseed_type qseed2 or qseed3 support.
+ * @smart_dma_rev Supported version of SmartDMA feature.
+ * @ubwc_version UBWC feature version (0x0 for not supported)
+ * @has_src_split source split feature status
+ * @has_dim_layer dim layer feature status
+ * @has_idle_pc indicate if idle power collapse feature is supported
+ * @has_3d_merge indicate if 3D merge is supported
+ * @max_linewidth max linewidth for sspp
+ * @pixel_ram_size size of latency hiding and de-tiling buffer in bytes
+ * @max_hdeci_exp max horizontal decimation supported (max is 2^value)
+ * @max_vdeci_exp max vertical decimation supported (max is 2^value)
+ */
+struct dpu_caps {
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 qseed_type;
+ u32 smart_dma_rev;
+ u32 ubwc_version;
+ bool has_src_split;
+ bool has_dim_layer;
+ bool has_idle_pc;
+ bool has_3d_merge;
+ /* SSPP limits */
+ u32 max_linewidth;
+ u32 pixel_ram_size;
+ u32 max_hdeci_exp;
+ u32 max_vdeci_exp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @qseed_ver: qseed version
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @num_formats: Number of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ * @virt_num_formats: Number of supported formats for virtual planes
+ * @dpu_rotation_cfg: inline rotation configuration
+ */
+struct dpu_sspp_sub_blks {
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 smart_dma_priority;
+ u32 max_per_pipe_bw;
+ u32 qseed_ver;
+ struct dpu_src_blk src_blk;
+ struct dpu_scaler_blk scaler_blk;
+ struct dpu_pp_blk csc_blk;
+ struct dpu_pp_blk hsic_blk;
+ struct dpu_pp_blk memcolor_blk;
+ struct dpu_pp_blk pcc_blk;
+ struct dpu_pp_blk igc_blk;
+
+ const u32 *format_list;
+ u32 num_formats;
+ const u32 *virt_format_list;
+ u32 virt_num_formats;
+ const struct dpu_rotation_cfg *rotation_cfg;
+};
+
+/**
+ * struct dpu_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct dpu_pp_blk gc;
+};
+
+/**
+ * struct dpu_dspp_sub_blks: Information of DSPP block
+ * @gc : gamma correction block
+ * @pcc: pixel color correction block
+ */
+struct dpu_dspp_sub_blks {
+ struct dpu_pp_blk gc;
+ struct dpu_pp_blk pcc;
+};
+
+struct dpu_pingpong_sub_blks {
+ struct dpu_pp_blk te;
+ struct dpu_pp_blk te2;
+ struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+ DPU_CLK_CTRL_NONE,
+ DPU_CLK_CTRL_VIG0,
+ DPU_CLK_CTRL_VIG1,
+ DPU_CLK_CTRL_VIG2,
+ DPU_CLK_CTRL_VIG3,
+ DPU_CLK_CTRL_VIG4,
+ DPU_CLK_CTRL_RGB0,
+ DPU_CLK_CTRL_RGB1,
+ DPU_CLK_CTRL_RGB2,
+ DPU_CLK_CTRL_RGB3,
+ DPU_CLK_CTRL_DMA0,
+ DPU_CLK_CTRL_DMA1,
+ DPU_CLK_CTRL_DMA2,
+ DPU_CLK_CTRL_DMA3,
+ DPU_CLK_CTRL_CURSOR0,
+ DPU_CLK_CTRL_CURSOR1,
+ DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_REG_DMA,
+ DPU_CLK_CTRL_WB2,
+ DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct dpu_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @ubwc_static: ubwc static configuration
+ * @ubwc_swizzle: ubwc default swizzle setting
+ * @clk_ctrls clock control register definition
+ */
+struct dpu_mdp_cfg {
+ DPU_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ u32 ubwc_swizzle;
+ struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_ctl_cfg : MDP CTL instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @intr_start: interrupt index for CTL_START
+ */
+struct dpu_ctl_cfg {
+ DPU_HW_BLK_INFO;
+ s32 intr_start;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct dpu_sspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_lm_sub_blks *sblk;
+ u32 pingpong;
+ u32 dspp;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct dpu_dspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_dspp_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intr_done: index for PINGPONG done interrupt
+ * @intr_rdptr: index for PINGPONG readpointer done interrupt
+ * @sblk sub-blocks information
+ */
+struct dpu_pingpong_cfg {
+ DPU_HW_BLK_INFO;
+ u32 merge_3d;
+ s32 intr_done;
+ s32 intr_rdptr;
+ const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_merge_3d_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct dpu_merge_3d_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_merge_3d_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_dsc_cfg - information of DSC blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_dsc_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ * @intr_underrun: index for INTF underrun interrupt
+ * @intr_vsync: index for INTF VSYNC interrupt
+ */
+struct dpu_intf_cfg {
+ DPU_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+ s32 intr_underrun;
+ s32 intr_vsync;
+};
+
+/**
+ * struct dpu_wb_cfg - information of writeback blocks
+ * @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO
+ * @vbif_idx: vbif client index
+ * @maxlinewidth: max line width supported by writeback block
+ * @xin_id: bus client identifier
+ * @intr_wb_done: interrupt index for WB_DONE
+ * @format_list: list of formats supported by this writeback block
+ * @num_formats: number of formats supported by this writeback block
+ * @clk_ctrl: clock control identifier
+ */
+struct dpu_wb_cfg {
+ DPU_HW_BLK_INFO;
+ u8 vbif_idx;
+ u32 maxlinewidth;
+ u32 xin_id;
+ s32 intr_wb_done;
+ const u32 *format_list;
+ u32 num_formats;
+ enum dpu_clk_ctrl_type clk_ctrl;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+ u32 count;
+ const struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl num of priority level
+ * @priority_lvl pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+ u32 npriority_lvl;
+ const u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @qos_rp_remap_size size of VBIF_XINL_QOS_RP_REMAP register space
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ * @qos_rt_tbl real-time QoS priority table
+ * @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+ DPU_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ u32 qos_rp_remap_size;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+ struct dpu_vbif_qos_tbl qos_rt_tbl;
+ struct dpu_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @version version of lutdma hw block
+ * @trigger_sel_off offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 trigger_sel_off;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ DPU_PERF_CDP_USAGE_RT,
+ DPU_PERF_CDP_USAGE_NRT,
+ DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ * @min_core_ib minimum bandwidth for core (kbps)
+ * @min_core_ib minimum mnoc ib vote in kbps
+ * @min_llcc_ib minimum llcc ib vote in kbps
+ * @min_dram_ib minimum dram ib vote in kbps
+ * @undersized_prefill_lines undersized prefill in lines
+ * @xtra_prefill_lines extra prefill latency in lines
+ * @dest_scale_prefill_lines destination scaler latency in lines
+ * @macrotile_perfill_lines macrotile latency in lines
+ * @yuv_nv12_prefill_lines yuv_nv12 latency in lines
+ * @linear_prefill_lines linear latency in lines
+ * @downscaling_prefill_lines downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines minimum pipeline latency in lines
+ * @clk_inefficiency_factor DPU src clock inefficiency factor
+ * @bw_inefficiency_factor DPU axi bus bw inefficiency factor
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
+ */
+struct dpu_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 min_core_ib;
+ u32 min_llcc_ib;
+ u32 min_dram_ib;
+ u32 undersized_prefill_lines;
+ u32 xtra_prefill_lines;
+ u32 dest_scale_prefill_lines;
+ u32 macrotile_prefill_lines;
+ u32 yuv_nv12_prefill_lines;
+ u32 linear_prefill_lines;
+ u32 downscaling_prefill_lines;
+ u32 amortizable_threshold;
+ u32 min_prefill_lines;
+ u32 clk_inefficiency_factor;
+ u32 bw_inefficiency_factor;
+ u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ * @mdss_irqs: Bitmap with the irqs supported by the target
+ */
+struct dpu_mdss_cfg {
+ const struct dpu_caps *caps;
+
+ u32 mdp_count;
+ const struct dpu_mdp_cfg *mdp;
+
+ u32 ctl_count;
+ const struct dpu_ctl_cfg *ctl;
+
+ u32 sspp_count;
+ const struct dpu_sspp_cfg *sspp;
+
+ u32 mixer_count;
+ const struct dpu_lm_cfg *mixer;
+
+ u32 pingpong_count;
+ const struct dpu_pingpong_cfg *pingpong;
+
+ u32 merge_3d_count;
+ const struct dpu_merge_3d_cfg *merge_3d;
+
+ u32 dsc_count;
+ struct dpu_dsc_cfg *dsc;
+
+ u32 intf_count;
+ const struct dpu_intf_cfg *intf;
+
+ u32 vbif_count;
+ const struct dpu_vbif_cfg *vbif;
+
+ u32 wb_count;
+ const struct dpu_wb_cfg *wb;
+
+ u32 reg_dma_count;
+ const struct dpu_reg_dma_cfg *dma_cfg;
+
+ u32 ad_count;
+
+ u32 dspp_count;
+ const struct dpu_dspp_cfg *dspp;
+
+ /* Add additional block data structures here */
+
+ const struct dpu_perf_cfg *perf;
+ const struct dpu_format_extended *dma_formats;
+ const struct dpu_format_extended *cursor_formats;
+ const struct dpu_format_extended *vig_formats;
+
+ unsigned long mdss_irqs;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+ u32 hw_rev;
+ const struct dpu_mdss_cfg *dpu_cfg;
+};
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev: caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 000000000..696c32d30
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PREPARE 0x0d0
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+#define CTL_MERGE_3D_ACTIVE 0x0E4
+#define CTL_WB_ACTIVE 0x0EC
+#define CTL_INTF_ACTIVE 0x0F4
+#define CTL_MERGE_3D_FLUSH 0x100
+#define CTL_DSC_ACTIVE 0x0E8
+#define CTL_DSC_FLUSH 0x104
+#define CTL_WB_FLUSH 0x108
+#define CTL_INTF_FLUSH 0x110
+#define CTL_INTF_MASTER 0x134
+#define CTL_FETCH_PIPE_ACTIVE 0x0FC
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+#define CTL_FLUSH_MASK_CTL BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US 2000
+#define MERGE_3D_IDX 23
+#define DSC_IDX 22
+#define INTF_IDX 31
+#define WB_IDX 16
+#define CTL_INVALID_BIT 0xffff
+#define CTL_DEFAULT_GROUP_ID 0xf
+
+static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
+ CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
+ 1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
+
+static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->blk_addr = addr + m->ctl[i].base;
+ b->log_mask = DPU_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+ enum dpu_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
+{
+ return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ trace_dpu_hw_ctl_update_pending_flush(flushbits,
+ ctx->pending_flush_mask);
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
+{
+ if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
+ ctx->pending_merge_3d_flush_mask);
+ if (ctx->pending_flush_mask & BIT(INTF_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
+ ctx->pending_intf_flush_mask);
+ if (ctx->pending_flush_mask & BIT(WB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
+ ctx->pending_wb_flush_mask);
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+ trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp sspp)
+{
+ switch (sspp) {
+ case SSPP_VIG0:
+ ctx->pending_flush_mask |= BIT(0);
+ break;
+ case SSPP_VIG1:
+ ctx->pending_flush_mask |= BIT(1);
+ break;
+ case SSPP_VIG2:
+ ctx->pending_flush_mask |= BIT(2);
+ break;
+ case SSPP_VIG3:
+ ctx->pending_flush_mask |= BIT(18);
+ break;
+ case SSPP_RGB0:
+ ctx->pending_flush_mask |= BIT(3);
+ break;
+ case SSPP_RGB1:
+ ctx->pending_flush_mask |= BIT(4);
+ break;
+ case SSPP_RGB2:
+ ctx->pending_flush_mask |= BIT(5);
+ break;
+ case SSPP_RGB3:
+ ctx->pending_flush_mask |= BIT(19);
+ break;
+ case SSPP_DMA0:
+ ctx->pending_flush_mask |= BIT(11);
+ break;
+ case SSPP_DMA1:
+ ctx->pending_flush_mask |= BIT(12);
+ break;
+ case SSPP_DMA2:
+ ctx->pending_flush_mask |= BIT(24);
+ break;
+ case SSPP_DMA3:
+ ctx->pending_flush_mask |= BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ ctx->pending_flush_mask |= BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ ctx->pending_flush_mask |= BIT(23);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm)
+{
+ switch (lm) {
+ case LM_0:
+ ctx->pending_flush_mask |= BIT(6);
+ break;
+ case LM_1:
+ ctx->pending_flush_mask |= BIT(7);
+ break;
+ case LM_2:
+ ctx->pending_flush_mask |= BIT(8);
+ break;
+ case LM_3:
+ ctx->pending_flush_mask |= BIT(9);
+ break;
+ case LM_4:
+ ctx->pending_flush_mask |= BIT(10);
+ break;
+ case LM_5:
+ ctx->pending_flush_mask |= BIT(20);
+ break;
+ default:
+ break;
+ }
+
+ ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
+}
+
+static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
+ enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ ctx->pending_flush_mask |= BIT(31);
+ break;
+ case INTF_1:
+ ctx->pending_flush_mask |= BIT(30);
+ break;
+ case INTF_2:
+ ctx->pending_flush_mask |= BIT(29);
+ break;
+ case INTF_3:
+ ctx->pending_flush_mask |= BIT(28);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ switch (wb) {
+ case WB_0:
+ case WB_1:
+ case WB_2:
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+}
+
+static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_intf intf)
+{
+ ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
+ ctx->pending_flush_mask |= BIT(INTF_IDX);
+}
+
+static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_merge_3d merge_3d)
+{
+ ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
+ ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
+}
+
+static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp dspp)
+{
+ switch (dspp) {
+ case DSPP_0:
+ ctx->pending_flush_mask |= BIT(13);
+ break;
+ case DSPP_1:
+ ctx->pending_flush_mask |= BIT(14);
+ break;
+ case DSPP_2:
+ ctx->pending_flush_mask |= BIT(15);
+ break;
+ case DSPP_3:
+ ctx->pending_flush_mask |= BIT(21);
+ break;
+ default:
+ break;
+ }
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ ktime_t timeout;
+ u32 status;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x1;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+ return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
+
+ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
+ }
+
+ DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+ u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ int i, j;
+ int stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+ if (!stage_cfg)
+ goto exit;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ enum dpu_sspp_multirect_index rect_index =
+ stage_cfg->multirect_index[i][j];
+
+ switch (stage_cfg->stage[i][j]) {
+ case SSPP_VIG0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+ } else {
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ }
+ break;
+ case SSPP_VIG1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+ } else {
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ }
+ break;
+ case SSPP_VIG2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ }
+ break;
+ case SSPP_VIG3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ }
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ }
+ break;
+ case SSPP_DMA1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ }
+ break;
+ case SSPP_DMA2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 0;
+ }
+ break;
+ case SSPP_DMA3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 4;
+ }
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+exit:
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+
+static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 wb_active = 0;
+ u32 mode_sel = 0;
+
+ /* CTL_TOP[31:28] carries group_id to collate CTL paths
+ * per VM. Explicitly disable it until VM support is
+ * added in SW. Power on reset value is not disable.
+ */
+ if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
+ mode_sel = CTL_DEFAULT_GROUP_ID << 28;
+
+ if (cfg->dsc)
+ DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
+
+ if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
+ mode_sel |= BIT(17);
+
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+
+ if (cfg->intf)
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ if (cfg->wb)
+ wb_active |= BIT(cfg->wb - WB_0);
+
+ DPU_REG_WRITE(c, CTL_TOP, mode_sel);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+
+ if (cfg->merge_3d)
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->dsc) {
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, BIT(DSC_IDX));
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+ }
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
+ switch (cfg->intf_mode_sel) {
+ case DPU_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case DPU_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 wb_active = 0;
+ u32 merge3d_active = 0;
+
+ /*
+ * This API resets each portion of the CTL path namely,
+ * clearing the sspps staged on the lm, merge_3d block,
+ * interfaces , writeback etc to ensure clean teardown of the pipeline.
+ * This will be used for writeback to begin with to have a
+ * proper teardown of the writeback session but upon further
+ * validation, this can be extended to all interfaces.
+ */
+ if (cfg->merge_3d) {
+ merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
+ merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ merge3d_active);
+ }
+
+ dpu_hw_ctl_clear_all_blendstages(ctx);
+
+ if (cfg->intf) {
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ intf_active &= ~BIT(cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ }
+
+ if (cfg->wb) {
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ wb_active &= ~BIT(cfg->wb - WB_0);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ }
+}
+
+static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active)
+{
+ int i;
+ u32 val = 0;
+
+ if (fetch_active) {
+ for (i = 0; i < SSPP_MAX; i++) {
+ if (test_bit(i, fetch_active) &&
+ fetch_tbl[i] != CTL_INVALID_BIT)
+ val |= BIT(fetch_tbl[i]);
+ }
+ }
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
+ ops->update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf_v1;
+ ops->update_pending_flush_merge_3d =
+ dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+ } else {
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->update_pending_flush_intf =
+ dpu_hw_ctl_update_pending_flush_intf;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ }
+ ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+ ops->trigger_start = dpu_hw_ctl_trigger_start;
+ ops->is_started = dpu_hw_ctl_is_started;
+ ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+ ops->reset = dpu_hw_ctl_reset_control;
+ ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
+ if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
+ ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_ctl *c;
+ const struct dpu_ctl_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ return c;
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 000000000..96c012ec8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID: Video mode interface
+ * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+ DPU_CTL_MODE_SEL_VID = 0,
+ DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+ enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+ enum dpu_sspp_multirect_index multirect_index
+ [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf : Interface id
+ * @mode_3d: 3d mux configuration
+ * @merge_3d: 3d merge block used
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @dsc: DSC BIT masks used
+ */
+struct dpu_hw_intf_cfg {
+ enum dpu_intf intf;
+ enum dpu_wb wb;
+ enum dpu_3d_blend_mode mode_3d;
+ enum dpu_merge_3d merge_3d;
+ enum dpu_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+ unsigned int dsc;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * check if the ctl is started
+ * @ctx : ctl path ctx pointer
+ * @Return: true if started, false if stopped
+ */
+ bool (*is_started)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * kickoff prepare is in progress hw operation for sw
+ * controlled interfaces: DSI cmd mode and WB interface
+ * are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * OR in the given flushbits to the cached pending_(wb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : writeback block index
+ */
+ void (*update_pending_flush_wb)(struct dpu_hw_ctl *ctx,
+ enum dpu_wb blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_(intf_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : interface block index
+ */
+ void (*update_pending_flush_intf)(struct dpu_hw_ctl *ctx,
+ enum dpu_intf blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : interface block index
+ */
+ void (*update_pending_flush_merge_3d)(struct dpu_hw_ctl *ctx,
+ enum dpu_merge_3d blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : SSPP block index
+ */
+ void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : LM block index
+ */
+ void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : DSPP block index
+ */
+ void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk);
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ /**
+ * reset ctl_path interface config
+ * @ctx : ctl path ctx pointer
+ * @cfg : interface config structure pointer
+ */
+ void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ int (*reset)(struct dpu_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+
+ void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+ unsigned long *fetch_active);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @pending_intf_flush_mask: pending INTF flush
+ * @pending_wb_flush_mask: pending WB flush
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct dpu_ctl_cfg *caps;
+ int mixer_count;
+ const struct dpu_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+ u32 pending_intf_flush_mask;
+ u32 pending_wb_flush_mask;
+ u32 pending_merge_3d_flush_mask;
+
+ /* ops */
+ struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
new file mode 100644
index 000000000..c8f145558
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2022, Linaro Limited
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_dsc.h"
+
+#define DSC_COMMON_MODE 0x000
+#define DSC_ENC 0x004
+#define DSC_PICTURE 0x008
+#define DSC_SLICE 0x00C
+#define DSC_CHUNK_SIZE 0x010
+#define DSC_DELAY 0x014
+#define DSC_SCALE_INITIAL 0x018
+#define DSC_SCALE_DEC_INTERVAL 0x01C
+#define DSC_SCALE_INC_INTERVAL 0x020
+#define DSC_FIRST_LINE_BPG_OFFSET 0x024
+#define DSC_BPG_OFFSET 0x028
+#define DSC_DSC_OFFSET 0x02C
+#define DSC_FLATNESS 0x030
+#define DSC_RC_MODEL_SIZE 0x034
+#define DSC_RC 0x038
+#define DSC_RC_BUF_THRESH 0x03C
+#define DSC_RANGE_MIN_QP 0x074
+#define DSC_RANGE_MAX_QP 0x0B0
+#define DSC_RANGE_BPG_OFFSET 0x0EC
+
+static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
+{
+ struct dpu_hw_blk_reg_map *c = &dsc->hw;
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, 0);
+}
+
+static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines)
+{
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 data;
+ u32 slice_last_group_size;
+ u32 det_thresh_flatness;
+ bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
+
+ if (is_cmd_mode)
+ initial_lines += 1;
+
+ slice_last_group_size = (dsc->slice_width + 2) % 3;
+
+ data = (initial_lines << 20);
+ data |= (slice_last_group_size << 18);
+ /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+ data |= (dsc->bits_per_pixel << 8);
+ data |= (dsc->block_pred_enable << 7);
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->simple_422 << 2);
+ data |= (dsc->convert_rgb << 1);
+ data |= dsc->bits_per_component;
+
+ DPU_REG_WRITE(c, DSC_ENC, data);
+
+ data = dsc->pic_width << 16;
+ data |= dsc->pic_height;
+ DPU_REG_WRITE(c, DSC_PICTURE, data);
+
+ data = dsc->slice_width << 16;
+ data |= dsc->slice_height;
+ DPU_REG_WRITE(c, DSC_SLICE, data);
+
+ data = dsc->slice_chunk_size << 16;
+ DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
+
+ data = dsc->initial_dec_delay << 16;
+ data |= dsc->initial_xmit_delay;
+ DPU_REG_WRITE(c, DSC_DELAY, data);
+
+ data = dsc->initial_scale_value;
+ DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
+
+ data = dsc->scale_decrement_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
+
+ data = dsc->scale_increment_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
+
+ data = dsc->first_line_bpg_offset;
+ DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
+
+ data = dsc->nfl_bpg_offset << 16;
+ data |= dsc->slice_bpg_offset;
+ DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
+
+ data = dsc->initial_offset << 16;
+ data |= dsc->final_offset;
+ DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
+
+ det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8);
+ data = det_thresh_flatness << 10;
+ data |= dsc->flatness_max_qp << 5;
+ data |= dsc->flatness_min_qp;
+ DPU_REG_WRITE(c, DSC_FLATNESS, data);
+
+ data = dsc->rc_model_size;
+ DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
+
+ data = dsc->rc_tgt_offset_low << 18;
+ data |= dsc->rc_tgt_offset_high << 14;
+ data |= dsc->rc_quant_incr_limit1 << 9;
+ data |= dsc->rc_quant_incr_limit0 << 4;
+ data |= dsc->rc_edge_factor;
+ DPU_REG_WRITE(c, DSC_RC, data);
+}
+
+static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc)
+{
+ struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params;
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 off;
+ int i;
+
+ off = DSC_RC_BUF_THRESH;
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
+ DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MIN_QP;
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_min_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MAX_QP;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_max_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_BPG_OFFSET;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_bpg_offset);
+ off += 4;
+ }
+}
+
+static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->dsc_count; i++) {
+ if (dsc == m->dsc[i].id) {
+ b->blk_addr = addr + m->dsc[i].base;
+ b->log_mask = DPU_DBG_MASK_DSC;
+ return &m->dsc[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
+ unsigned long cap)
+{
+ ops->dsc_disable = dpu_hw_dsc_disable;
+ ops->dsc_config = dpu_hw_dsc_config;
+ ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
+};
+
+struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_dsc *c;
+ struct dpu_dsc_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dsc_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_dsc_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
+{
+ kfree(dsc);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
new file mode 100644
index 000000000..c0b77fe1a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020-2022, Linaro Limited */
+
+#ifndef _DPU_HW_DSC_H
+#define _DPU_HW_DSC_H
+
+#include <drm/display/drm_dsc.h>
+
+#define DSC_MODE_SPLIT_PANEL BIT(0)
+#define DSC_MODE_MULTIPLEX BIT(1)
+#define DSC_MODE_VIDEO BIT(2)
+
+struct dpu_hw_dsc;
+
+/**
+ * struct dpu_hw_dsc_ops - interface to the dsc hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dsc_ops {
+ /**
+ * dsc_disable - disable dsc
+ * @hw_dsc: Pointer to dsc context
+ */
+ void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
+
+ /**
+ * dsc_config - configures dsc encoder
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ * @mode: dsc topology mode to be set
+ * @initial_lines: amount of initial lines to be used
+ */
+ void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines);
+
+ /**
+ * dsc_config_thresh - programs panel thresholds
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ */
+ void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
+ struct drm_dsc_config *dsc);
+};
+
+struct dpu_hw_dsc {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dsc */
+ enum dpu_dsc idx;
+ const struct dpu_dsc_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_dsc_ops ops;
+};
+
+/**
+ * dpu_hw_dsc_init - initializes the dsc block for the passed dsc idx.
+ * @idx: DSC index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_dsc context
+ */
+struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_dsc_destroy - destroys dsc driver context
+ * @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init
+ */
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc);
+
+static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dsc, base);
+}
+
+#endif /* _DPU_HW_DSC_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
new file mode 100644
index 000000000..8ab5ace34
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_kms.h"
+
+
+/* DSPP_PCC */
+#define PCC_EN BIT(0)
+#define PCC_DIS 0
+#define PCC_RED_R_OFF 0x10
+#define PCC_RED_G_OFF 0x1C
+#define PCC_RED_B_OFF 0x28
+#define PCC_GREEN_R_OFF 0x14
+#define PCC_GREEN_G_OFF 0x20
+#define PCC_GREEN_B_OFF 0x2C
+#define PCC_BLUE_R_OFF 0x18
+#define PCC_BLUE_G_OFF 0x24
+#define PCC_BLUE_B_OFF 0x30
+
+static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+
+ u32 base;
+
+ if (!ctx) {
+ DRM_ERROR("invalid ctx %pK\n", ctx);
+ return;
+ }
+
+ base = ctx->cap->sblk->pcc.base;
+
+ if (!base) {
+ DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ return;
+ }
+
+ if (!cfg) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ DPU_REG_WRITE(&ctx->hw, base, PCC_DIS);
+ return;
+ }
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b);
+
+ DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
+}
+
+static void _setup_dspp_ops(struct dpu_hw_dspp *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_DSPP_PCC, &features))
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
+}
+
+static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->dspp_count; i++) {
+ if (dspp == m->dspp[i].id) {
+ b->blk_addr = addr + m->dspp[i].base;
+ b->log_mask = DPU_DBG_MASK_DSPP;
+ return &m->dspp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_dspp *c;
+ const struct dpu_dspp_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_dspp_ops(c, c->cap->features);
+
+ return c;
+}
+
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
+{
+ kfree(dspp);
+}
+
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
new file mode 100644
index 000000000..05ecfdfac
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_DSPP_H
+#define _DPU_HW_DSPP_H
+
+struct dpu_hw_dspp;
+
+/**
+ * struct dpu_hw_pcc_coeff - PCC coefficient structure for each color
+ * component.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ */
+
+struct dpu_hw_pcc_coeff {
+ __u32 r;
+ __u32 g;
+ __u32 b;
+};
+
+/**
+ * struct dpu_hw_pcc - pcc feature structure
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ */
+struct dpu_hw_pcc_cfg {
+ struct dpu_hw_pcc_coeff r;
+ struct dpu_hw_pcc_coeff g;
+ struct dpu_hw_pcc_coeff b;
+};
+
+/**
+ * struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dspp_ops {
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg);
+
+};
+
+/**
+ * struct dpu_hw_dspp - dspp description
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: DSPP index
+ * @cap: Pointer to layer_cfg
+ * @ops: Pointer to operations possible for this DSPP
+ */
+struct dpu_hw_dspp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dspp */
+ int idx;
+ const struct dpu_dspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_dspp_ops ops;
+};
+
+/**
+ * dpu_hw_dspp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dspp, base);
+}
+
+/**
+ * dpu_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx: DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @Return: pointer to structure or ERR_PTR
+ */
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp: Pointer to DSPP driver context
+ */
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+
+#endif /*_DPU_HW_DSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 000000000..75e1b89c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_trace.h"
+
+/*
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF 0x0
+#define MDP_INTF_0_OFF 0x6A000
+#define MDP_INTF_1_OFF 0x6A800
+#define MDP_INTF_2_OFF 0x6B000
+#define MDP_INTF_3_OFF 0x6B800
+#define MDP_INTF_4_OFF 0x6C000
+#define MDP_INTF_5_OFF 0x6C800
+#define INTF_INTR_EN 0x1c0
+#define INTF_INTR_STATUS 0x1c4
+#define INTF_INTR_CLEAR 0x1c8
+#define MDP_AD4_0_OFF 0x7C000
+#define MDP_AD4_1_OFF 0x7D000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
+#define MDP_INTF_0_OFF_REV_7xxx 0x34000
+#define MDP_INTF_1_OFF_REV_7xxx 0x35000
+#define MDP_INTF_2_OFF_REV_7xxx 0x36000
+#define MDP_INTF_3_OFF_REV_7xxx 0x37000
+#define MDP_INTF_4_OFF_REV_7xxx 0x38000
+#define MDP_INTF_5_OFF_REV_7xxx 0x39000
+
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct dpu_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/*
+ * struct dpu_intr_reg - List of DPU interrupt registers
+ *
+ * When making changes be sure to sync with dpu_hw_intr_reg
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+ [MDP_SSPP_TOP0_INTR] = {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ [MDP_SSPP_TOP0_INTR2] = {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ [MDP_SSPP_TOP0_HIST_INTR] = {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ [MDP_INTF0_INTR] = {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ [MDP_INTF1_INTR] = {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ [MDP_INTF2_INTR] = {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ [MDP_INTF3_INTR] = {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ [MDP_INTF4_INTR] = {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ },
+ [MDP_INTF5_INTR] = {
+ MDP_INTF_5_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_5_OFF+INTF_INTR_EN,
+ MDP_INTF_5_OFF+INTF_INTR_STATUS
+ },
+ [MDP_AD4_0_INTR] = {
+ MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ [MDP_AD4_1_INTR] = {
+ MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ [MDP_INTF0_7xxx_INTR] = {
+ MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ [MDP_INTF1_7xxx_INTR] = {
+ MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ [MDP_INTF2_7xxx_INTR] = {
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ [MDP_INTF3_7xxx_INTR] = {
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ [MDP_INTF4_7xxx_INTR] = {
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ [MDP_INTF5_7xxx_INTR] = {
+ MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+};
+
+#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
+#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @dpu_kms: Pointer to DPU's KMS structure
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ VERB("irq_idx=%d\n", irq_idx);
+
+ if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
+ DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
+
+ atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
+
+ /*
+ * Perform registered function callback
+ */
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
+}
+
+irqreturn_t dpu_core_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int reg_idx;
+ int irq_idx;
+ u32 irq_status;
+ u32 enable_mask;
+ int bit;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+ if (!test_bit(reg_idx, &intr->irq_mask))
+ continue;
+
+ /* Read interrupt status */
+ irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
+
+ /* and clear the interrupt */
+ if (irq_status)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ irq_status);
+
+ /* Finally update IRQ status based on enable mask */
+ irq_status &= enable_mask;
+
+ if (!irq_status)
+ continue;
+
+ /*
+ * Search through matching intr status.
+ */
+ while ((bit = ffs(irq_status)) != 0) {
+ irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
+
+ dpu_core_irq_callback_handler(dpu_kms, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~BIT(bit - 1);
+ }
+ }
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return IRQ_HANDLED;
+}
+
+static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ /*
+ * The cache_irq_mask and hardware RMW operations needs to be done
+ * under irq_lock and it's the caller's responsibility to ensure that's
+ * held.
+ */
+ assert_spin_locked(&intr->irq_lock);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
+ reg = &dpu_intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
+ dbgstr = "DPU IRQ already set:";
+ } else {
+ dbgstr = "DPU IRQ enabled:";
+
+ cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
+ /* Enabling interrupts with the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ /*
+ * The cache_irq_mask and hardware RMW operations needs to be done
+ * under irq_lock and it's the caller's responsibility to ensure that's
+ * held.
+ */
+ assert_spin_locked(&intr->irq_lock);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
+ reg = &dpu_intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
+ dbgstr = "DPU IRQ is already cleared:";
+ } else {
+ dbgstr = "DPU IRQ mask disable:";
+
+ cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
+ /* Disable interrupts based on the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+ DPU_IRQ_MASK(irq_idx), cache_irq_mask);
+
+ return 0;
+}
+
+static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int i;
+
+ if (!intr)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].clr_off, 0xffffffff);
+ }
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int i;
+
+ if (!intr)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ if (test_bit(i, &intr->irq_mask))
+ DPU_REG_WRITE(&intr->hw,
+ dpu_intr_set[i].en_off, 0x00000000);
+ }
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = DPU_IRQ_REG(irq_idx);
+ intr_status = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[reg_idx].status_off) &
+ DPU_IRQ_MASK(irq_idx);
+ if (intr_status)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __intr_offset(const struct dpu_mdss_cfg *m,
+ void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+ hw->blk_addr = addr + m->mdp[0].base;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intr *intr;
+ int nirq = MDP_INTR_MAX * 32;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ __intr_offset(m, addr, &intr->hw);
+
+ intr->total_irqs = nirq;
+
+ intr->irq_mask = m->mdss_irqs;
+
+ spin_lock_init(&intr->irq_lock);
+
+ return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+ kfree(intr);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg)
+{
+ unsigned long irq_flags;
+ int ret;
+
+ if (!irq_cb) {
+ DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return -EBUSY;
+ }
+
+ trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+
+ ret = dpu_hw_intr_enable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ trace_dpu_irq_register_success(irq_idx);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret;
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx);
+
+ ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
+ irq_idx, ret);
+
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ trace_dpu_irq_unregister_success(irq_idx);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_kms *dpu_kms = s->private;
+ unsigned long irq_flags;
+ int i, irq_count;
+ void *cb;
+
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
+ cb = dpu_kms->hw_intr->irq_tbl[i].cb;
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ if (irq_count || cb)
+ seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
+
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ debugfs_create_file("core_irq", 0600, parent, dpu_kms,
+ &dpu_debugfs_core_irq_fops);
+}
+#endif
+
+void dpu_core_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
+}
+
+void dpu_core_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
+
+ if (!dpu_kms->hw_intr)
+ return;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ if (dpu_kms->hw_intr->irq_tbl[i].cb)
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 000000000..464439554
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/* When making changes be sure to sync with dpu_intr_set */
+enum dpu_hw_intr_reg {
+ MDP_SSPP_TOP0_INTR,
+ MDP_SSPP_TOP0_INTR2,
+ MDP_SSPP_TOP0_HIST_INTR,
+ MDP_INTF0_INTR,
+ MDP_INTF1_INTR,
+ MDP_INTF2_INTR,
+ MDP_INTF3_INTR,
+ MDP_INTF4_INTR,
+ MDP_INTF5_INTR,
+ MDP_AD4_0_INTR,
+ MDP_AD4_1_INTR,
+ MDP_INTF0_7xxx_INTR,
+ MDP_INTF1_7xxx_INTR,
+ MDP_INTF2_7xxx_INTR,
+ MDP_INTF3_7xxx_INTR,
+ MDP_INTF4_7xxx_INTR,
+ MDP_INTF5_7xxx_INTR,
+ MDP_INTR_MAX,
+};
+
+#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @total_irqs: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ * @irq_cb_tbl: array of IRQ callbacks
+ */
+struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ u32 cache_irq_mask[MDP_INTR_MAX];
+ u32 *save_irq_status;
+ u32 total_irqs;
+ spinlock_t irq_lock;
+ unsigned long irq_mask;
+
+ struct {
+ void (*cb)(void *arg, int irq_idx);
+ void *arg;
+ atomic_t count;
+ } irq_tbl[];
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 000000000..384558d2f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_CONFIG2 0x060
+#define INTF_DISPLAY_DATA_HCTL 0x064
+#define INTF_ACTIVE_DATA_HCTL 0x068
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+#define INTF_PROG_ROT_START 0x174
+#define INTF_MUX 0x25C
+#define INTF_STATUS 0x26C
+
+#define INTF_CFG_ACTIVE_H_EN BIT(29)
+#define INTF_CFG_ACTIVE_V_EN BIT(30)
+
+#define INTF_CFG2_DATABUS_WIDEN BIT(0)
+#define INTF_CFG2_DATA_HCTL_EN BIT(4)
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->blk_addr = addr + m->intf[i].base;
+ b->log_mask = DPU_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 hsync_data_start_x, hsync_data_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg, intf_cfg2 = 0;
+ u32 display_data_hctl = 0, active_data_hctl = 0;
+ u32 data_width;
+ bool dp_intf = false;
+
+ /* read interface_cfg */
+ intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+
+ if (ctx->cap->type == INTF_DP)
+ dp_intf = true;
+
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) { /* border fill added */
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) { /* border fill added */
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN;
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= INTF_CFG_ACTIVE_V_EN;
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ /*
+ * DATA_HCTL_EN controls data timing which can be different from
+ * video timing. It is recommended to enable it for all cases, except
+ * if compression is enabled in 1 pixel per clock mode
+ */
+ if (p->wide_bus_en)
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
+
+ data_width = p->width;
+
+ hsync_data_start_x = hsync_start_x;
+ hsync_data_end_x = hsync_start_x + data_width - 1;
+
+ display_data_hctl = (hsync_data_end_x << 16) | hsync_data_start_x;
+
+ if (dp_intf) {
+ /* DP timing adjustment */
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+
+ active_hctl = (active_h_end << 16) | active_h_start;
+ display_hctl = active_hctl;
+
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN | INTF_CFG_ACTIVE_V_EN;
+ }
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else if (ctx->cap->type == INTF_DP) {
+ hsync_polarity = p->hsync_polarity;
+ vsync_polarity = p->vsync_polarity;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+ if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
+ DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
+ }
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+ struct dpu_hw_intf *intf,
+ u8 enable)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+ struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_bind_pingpong_blk(
+ struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 mux_cfg;
+
+ mux_cfg = DPU_REG_READ(c, INTF_MUX);
+ mux_cfg &= ~0xf;
+
+ if (enable)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, INTF_MUX, mux_cfg);
+}
+
+static void dpu_hw_intf_get_status(
+ struct dpu_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ unsigned long cap = intf->cap->features;
+
+ if (cap & BIT(DPU_INTF_STATUS_SUPPORTED))
+ s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0);
+ else
+ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+
+ s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31));
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return 0;
+
+ c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf)
+{
+ dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1);
+}
+
+static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value)
+{
+ return dpu_hw_collect_misr(&intf->hw, INTF_MISR_CTRL, INTF_MISR_SIGNATURE, misr_value);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ ops->get_status = dpu_hw_intf_get_status;
+ ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+ ops->get_line_count = dpu_hw_intf_get_line_count;
+ if (cap & BIT(DPU_INTF_INPUT_CTRL))
+ ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
+}
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intf *c;
+ const struct dpu_intf_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ return c;
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 000000000..e75339b96
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+
+ bool wide_bus_en;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u8 is_prog_fetch_en; /* interface prog fetch counter is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ get_line_count: reads current vertical line counter
+ * @bind_pingpong_blk: enable/disable the connection with pingpong which will
+ * feed pixels to this interface
+ * @setup_misr: enable/disable MISR
+ * @collect_misr: read MISR signature
+ */
+struct dpu_hw_intf_ops {
+ void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt);
+
+ void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct dpu_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct dpu_hw_intf *intf,
+ struct intf_status *status);
+
+ u32 (*get_line_count)(struct dpu_hw_intf *intf);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_intf *intf,
+ bool enable,
+ const enum dpu_pingpong pp);
+ void (*setup_misr)(struct dpu_hw_intf *intf);
+ int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value);
+};
+
+struct dpu_hw_intf {
+ struct dpu_hw_blk_reg_map hw;
+
+ /* intf */
+ enum dpu_intf idx;
+ const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_cfg *mdss;
+
+ /* ops */
+ struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 000000000..cc04fb979
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_FG_COLOR_FILL_COLOR_0 0x08
+#define LM_FG_COLOR_FILL_COLOR_1 0x0C
+#define LM_FG_COLOR_FILL_SIZE 0x10
+#define LM_FG_COLOR_FILL_XY 0x14
+
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
+
+static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->blk_addr = addr + m->mixer[i].base;
+ b->log_mask = DPU_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @ctx: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+ const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+ if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+ return sblk->blendstage_base[stage - DPU_STAGE_0];
+
+ return -EINVAL;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *mixer)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx)
+{
+ dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0);
+}
+
+static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
+{
+ return dpu_hw_collect_misr(&ctx->hw, LM_MISR_CTRL, LM_MISR_SIGNATURE, misr_value);
+}
+
+static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
+ struct dpu_hw_lm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_mixer_out = dpu_hw_lm_setup_out;
+ if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha;
+ else
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+ ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
+}
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mixer *c;
+ const struct dpu_lm_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ return c;
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 000000000..0a050eb24
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct dpu_hw_color3_cfg {
+ u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en);
+
+ /**
+ * setup_misr: Enable/disable MISR
+ */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx);
+
+ /**
+ * collect_misr: Read MISR signature
+ */
+ int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value);
+};
+
+struct dpu_hw_mixer {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* lm */
+ enum dpu_lm idx;
+ const struct dpu_lm_cfg *cap;
+ const struct dpu_mdp_cfg *mdp;
+ const struct dpu_ctl_cfg *ctl;
+
+ /* ops */
+ struct dpu_hw_lm_ops ops;
+
+ /* store mixer info specific to display */
+ struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 000000000..d3b0ed0a9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME "dpu"
+
+#define DPU_NONE 0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES 3
+#endif
+
+enum dpu_format_flags {
+ DPU_FORMAT_FLAG_YUV_BIT,
+ DPU_FORMAT_FLAG_DX_BIT,
+ DPU_FORMAT_FLAG_COMPRESSED_BIT,
+ DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X) \
+ (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X) \
+ (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO 0
+#define DPU_VSYNC1_SOURCE_GPIO 1
+#define DPU_VSYNC2_SOURCE_GPIO 2
+#define DPU_VSYNC_SOURCE_INTF_0 3
+#define DPU_VSYNC_SOURCE_INTF_1 4
+#define DPU_VSYNC_SOURCE_INTF_2 5
+#define DPU_VSYNC_SOURCE_INTF_3 6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
+
+enum dpu_hw_blk_type {
+ DPU_HW_BLK_TOP = 0,
+ DPU_HW_BLK_SSPP,
+ DPU_HW_BLK_LM,
+ DPU_HW_BLK_CTL,
+ DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_INTF,
+ DPU_HW_BLK_WB,
+ DPU_HW_BLK_DSPP,
+ DPU_HW_BLK_MERGE_3D,
+ DPU_HW_BLK_DSC,
+ DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum dpu_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum dpu_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum dpu_stage {
+ DPU_STAGE_BASE = 0,
+ DPU_STAGE_0,
+ DPU_STAGE_1,
+ DPU_STAGE_2,
+ DPU_STAGE_3,
+ DPU_STAGE_4,
+ DPU_STAGE_5,
+ DPU_STAGE_6,
+ DPU_STAGE_7,
+ DPU_STAGE_8,
+ DPU_STAGE_9,
+ DPU_STAGE_10,
+ DPU_STAGE_MAX
+};
+enum dpu_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum dpu_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_5,
+ CTL_MAX
+};
+
+enum dpu_dsc {
+ DSC_NONE = 0,
+ DSC_0,
+ DSC_1,
+ DSC_2,
+ DSC_3,
+ DSC_4,
+ DSC_5,
+ DSC_MAX
+};
+
+enum dpu_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_5,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum dpu_merge_3d {
+ MERGE_3D_0 = 1,
+ MERGE_3D_1,
+ MERGE_3D_2,
+ MERGE_3D_MAX
+};
+
+enum dpu_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+/*
+ * Historically these values correspond to the values written to the
+ * DISP_INTF_SEL register, which had to programmed manually. On newer MDP
+ * generations this register is NOP, but we keep the values for historical
+ * reasons.
+ */
+enum dpu_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ /* old eDP found on 8x74 and 8x84 */
+ INTF_EDP = 0x9,
+ /* both DP and eDP, handled by the new DP driver */
+ INTF_DP = 0xa,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum dpu_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum dpu_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum dpu_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+ VBIF_RT,
+ VBIF_NRT,
+ VBIF_MAX,
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED : Color components in single plane
+ * @DPU_PLANE_PLANAR : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+ DPU_PLANE_INTERLEAVED,
+ DPU_PLANE_PLANAR,
+ DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB : No chroma subsampling
+ * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420 : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+ DPU_CHROMA_RGB,
+ DPU_CHROMA_H2V1,
+ DPU_CHROMA_H1V2,
+ DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR : fetch is line by line
+ * @DPU_FETCH_TILE : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC : fetch and decompress data
+ */
+enum dpu_fetch_type {
+ DPU_FETCH_LINEAR,
+ DPU_FETCH_TILE,
+ DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum dpu_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format structure containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+ struct msm_format base;
+ enum dpu_plane_type fetch_planes;
+ u8 element[DPU_MAX_PLANES];
+ u8 bits[DPU_MAX_PLANES];
+ enum dpu_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum dpu_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+ const struct dpu_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ uint32_t plane_size[DPU_MAX_PLANES];
+ uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE (1 << 0)
+#define DPU_DBG_MASK_INTF (1 << 1)
+#define DPU_DBG_MASK_LM (1 << 2)
+#define DPU_DBG_MASK_CTL (1 << 3)
+#define DPU_DBG_MASK_PINGPONG (1 << 4)
+#define DPU_DBG_MASK_SSPP (1 << 5)
+#define DPU_DBG_MASK_WB (1 << 6)
+#define DPU_DBG_MASK_TOP (1 << 7)
+#define DPU_DBG_MASK_VBIF (1 << 8)
+#define DPU_DBG_MASK_ROT (1 << 9)
+#define DPU_DBG_MASK_DSPP (1 << 10)
+#define DPU_DBG_MASK_DSC (1 << 11)
+
+#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
new file mode 100644
index 000000000..def0a87fd
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define MERGE_3D_MUX 0x000
+#define MERGE_3D_MODE 0x004
+
+static const struct dpu_merge_3d_cfg *_merge_3d_offset(enum dpu_merge_3d idx,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->merge_3d_count; i++) {
+ if (idx == m->merge_3d[i].id) {
+ b->blk_addr = addr + m->merge_3d[i].base;
+ b->log_mask = DPU_DBG_MASK_PINGPONG;
+ return &m->merge_3d[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
+ enum dpu_3d_blend_mode mode_3d)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 data;
+
+
+ c = &merge_3d->hw;
+ if (mode_3d == BLEND_3D_NONE) {
+ DPU_REG_WRITE(c, MERGE_3D_MODE, 0);
+ DPU_REG_WRITE(c, MERGE_3D_MUX, 0);
+ } else {
+ data = BIT(0) | ((mode_3d - 1) << 1);
+ DPU_REG_WRITE(c, MERGE_3D_MODE, data);
+ }
+}
+
+static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
+ unsigned long features)
+{
+ c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
+};
+
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_merge_3d *c;
+ const struct dpu_merge_3d_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _merge_3d_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_merge_3d_ops(c, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
+{
+ kfree(hw);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
new file mode 100644
index 000000000..81fd1d5f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_MERGE3D_H
+#define _DPU_HW_MERGE3D_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_merge_3d;
+
+/**
+ *
+ * struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_3d_mode : enable 3D merge
+ */
+struct dpu_hw_merge_3d_ops {
+ void (*setup_3d_mode)(struct dpu_hw_merge_3d *merge_3d,
+ enum dpu_3d_blend_mode mode_3d);
+
+};
+
+struct dpu_hw_merge_3d {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* merge_3d */
+ enum dpu_merge_3d idx;
+ const struct dpu_merge_3d_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_merge_3d_ops ops;
+};
+
+/**
+ * to_dpu_hw_merge_3d - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_merge_3d, base);
+}
+
+/**
+ * dpu_hw_merge_3d_init - initializes the merge_3d driver for the passed
+ * merge_3d idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_merge_3d context
+ */
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_merge_3d_destroy - destroys merge_3d driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init
+ */
+void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp);
+
+#endif /*_DPU_HW_MERGE3D_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 000000000..0fcad9760
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DSC_MODE 0x0a0
+#define PP_DCE_DATA_IN_SWAP 0x0ac
+#define PP_DCE_DATA_OUT_SWAP 0x0c8
+
+#define PP_DITHER_EN 0x000
+#define PP_DITHER_BITDEPTH 0x004
+#define PP_DITHER_MATRIX 0x008
+
+#define DITHER_DEPTH_MAP_INDEX 9
+
+static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = {
+ 0, 0, 0, 0, 0, 0, 0, 1, 2
+};
+
+static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->blk_addr = addr + m->pingpong[i].base;
+ b->log_mask = DPU_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_dither_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 i, base, data = 0;
+
+ c = &pp->hw;
+ base = pp->caps->sblk->dither.base;
+ if (!cfg) {
+ DPU_REG_WRITE(c, base + PP_DITHER_EN, 0);
+ return;
+ }
+
+ data = dither_depth_map[cfg->c0_bitdepth] & REG_MASK(2);
+ data |= (dither_depth_map[cfg->c1_bitdepth] & REG_MASK(2)) << 2;
+ data |= (dither_depth_map[cfg->c2_bitdepth] & REG_MASK(2)) << 4;
+ data |= (dither_depth_map[cfg->c3_bitdepth] & REG_MASK(2)) << 6;
+ data |= (cfg->temporal_en) ? (1 << 8) : 0;
+
+ DPU_REG_WRITE(c, base + PP_DITHER_BITDEPTH, data);
+
+ for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) {
+ data = (cfg->matrix[i] & REG_MASK(4)) |
+ ((cfg->matrix[i + 1] & REG_MASK(4)) << 4) |
+ ((cfg->matrix[i + 2] & REG_MASK(4)) << 8) |
+ ((cfg->matrix[i + 3] & REG_MASK(4)) << 12);
+ DPU_REG_WRITE(c, base + PP_DITHER_MATRIX + i, data);
+ }
+ DPU_REG_WRITE(c, base + PP_DITHER_EN, 1);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!pp || !te)
+ return -EINVAL;
+ c = &pp->hw;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable)
+{
+ DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG,
+ enable ? (BIT(31) | frame_count) : 0);
+}
+
+/*
+ * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW
+ * @pp: DPU pingpong structure
+ * @frame_count: Used to return the current frame count from hw
+ *
+ * Returns: True if autorefresh enabled, false if disabled.
+ */
+static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp,
+ u32 *frame_count)
+{
+ u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG);
+ if (frame_count != NULL)
+ *frame_count = val & 0xffff;
+ return !!((val & BIT(31)) >> 31);
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+ u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+ int rc;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ rc = readl_poll_timeout(c->blk_addr + PP_LINE_COUNT,
+ val, (val & 0xffff) >= 1, 10, timeout_us);
+
+ return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!pp)
+ return -EINVAL;
+ c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 cfg;
+ int orig;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+
+ if (!pp || !info)
+ return -EINVAL;
+ c = &pp->hw;
+
+ val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 height, init;
+ u32 line = 0xFFFF;
+
+ if (!pp)
+ return 0;
+ c = &pp->hw;
+
+ init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+ height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+ if (height < init)
+ return line;
+
+ line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+ if (line < init)
+ line += (0xFFFF - init);
+ else
+ line -= init;
+
+ return line;
+}
+
+static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 1);
+ return 0;
+}
+
+static void dpu_hw_pp_dsc_disable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 0);
+}
+
+static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *pp_c = &pp->hw;
+ int data;
+
+ data = DPU_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP);
+ data |= BIT(18); /* endian flip */
+ DPU_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data);
+ return 0;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
+ unsigned long features)
+{
+ c->ops.setup_tearcheck = dpu_hw_pp_setup_te_config;
+ c->ops.enable_tearcheck = dpu_hw_pp_enable_te;
+ c->ops.connect_external_te = dpu_hw_pp_connect_external_te;
+ c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info;
+ c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config;
+ c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
+ c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+ c->ops.get_line_count = dpu_hw_pp_get_line_count;
+ c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
+ c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
+ c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
+
+ if (test_bit(DPU_PINGPONG_DITHER, &features))
+ c->ops.setup_dither = dpu_hw_pp_setup_dither;
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_pingpong *c;
+ const struct dpu_pingpong_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_pingpong_ops(c, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 000000000..c00223441
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+#define DITHER_MATRIX_SZ 16
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+ u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
+ u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
+ u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
+ u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
+};
+
+/**
+ * struct dpu_hw_dither_cfg - dither feature structure
+ * @flags: for customizing operations
+ * @temporal_en: temperal dither enable
+ * @c0_bitdepth: c0 component bit depth
+ * @c1_bitdepth: c1 component bit depth
+ * @c2_bitdepth: c2 component bit depth
+ * @c3_bitdepth: c2 component bit depth
+ * @matrix: dither strength matrix
+ */
+struct dpu_hw_dither_cfg {
+ u64 flags;
+ u32 temporal_en;
+ u32 c0_bitdepth;
+ u32 c1_bitdepth;
+ u32 c2_bitdepth;
+ u32 c3_bitdepth;
+ u32 matrix[DITHER_MATRIX_SZ];
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck : program tear check values
+ * @enable_tearcheck : enables tear check
+ * @get_vsync_info : retries timing info of the panel
+ * @setup_autorefresh : configure and enable the autorefresh config
+ * @get_autorefresh : retrieve autorefresh config from hardware
+ * @setup_dither : function to program the dither hw block
+ * @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * read, modify, write to either set or clear listening to external TE
+ * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ */
+ int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+ bool enable_external_te);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info);
+
+ /**
+ * configure and enable the autorefresh config
+ */
+ void (*setup_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 frame_count, bool enable);
+
+ /**
+ * retrieve autorefresh config from hardware
+ */
+ bool (*get_autorefresh)(struct dpu_hw_pingpong *pp,
+ u32 *frame_count);
+
+ /**
+ * poll until write pointer transmission starts
+ * @Return: 0 on success, -ETIMEDOUT on timeout
+ */
+ int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+ /**
+ * Obtain current vertical line counter
+ */
+ u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Setup dither matix for pingpong block
+ */
+ void (*setup_dither)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_dither_cfg *cfg);
+ /**
+ * Enable DSC
+ */
+ int (*enable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Disable DSC
+ */
+ void (*disable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Setup DSC
+ */
+ int (*setup_dsc)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_merge_3d;
+
+struct dpu_hw_pingpong {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum dpu_pingpong idx;
+ const struct dpu_pingpong_cfg *caps;
+ struct dpu_hw_merge_3d *merge_3d;
+
+ /* ops */
+ struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * to_dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 000000000..691c471b0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_kms.h"
+
+#include <drm/drm_file.h>
+
+#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1 0x16C
+#define SSPP_SRC_XY_REC1 0x168
+#define SSPP_OUT_SIZE_REC1 0x160
+#define SSPP_OUT_XY_REC1 0x164
+#define SSPP_SRC_FORMAT_REC1 0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define SSPP_SRC_OP_MODE_REC1 0x17C
+#define SSPP_MULTIRECT_OPMODE 0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+#define SSPP_EXCL_REC_SIZE_REC1 0x184
+#define SSPP_EXCL_REC_XY_REC1 0x188
+
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_EXCL_REC_CTL 0x40
+#define SSPP_UBWC_STATIC_CTRL 0x44
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_CDP_CNTL_REC1 0x13c
+#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+#define SSPP_TRAFFIC_SHAPER_REC1 0x158
+#define SSPP_EXCL_REC_SIZE 0x1B4
+#define SSPP_EXCL_REC_XY 0x1B8
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK 19200000
+
+
+static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct dpu_sspp_sub_blks *sblk;
+
+ if (!ctx || !ctx->cap || !ctx->cap->sblk)
+ return -EINVAL;
+
+ sblk = ctx->cap->sblk;
+
+ switch (s_id) {
+ case DPU_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case DPU_SSPP_SCALER_QSEED2:
+ case DPU_SSPP_SCALER_QSEED3:
+ case DPU_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case DPU_SSPP_CSC:
+ case DPU_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode)
+{
+ u32 mode_mask;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (index == DPU_SSPP_RECT_SOLO) {
+ /**
+ * if rect index is RECT_SOLO, we cannot expect a
+ * virtual plane sharing the same SSPP id. So we go
+ * and disable multirect
+ */
+ mode_mask = 0;
+ } else {
+ mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+ mode_mask |= index;
+ if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/*
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode = 0;
+ u32 fast_clear = 0;
+ u32 op_mode_off, unpack_pat_off, format_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+ op_mode_off = SSPP_SRC_OP_MODE;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+ format_off = SSPP_SRC_FORMAT;
+ } else {
+ op_mode_off = SSPP_SRC_OP_MODE_REC1;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+ format_off = SSPP_SRC_FORMAT_REC1;
+ }
+
+ c = &ctx->hw;
+ opmode = DPU_REG_READ(c, op_mode_off + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & DPU_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & DPU_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == DPU_CHROMA_H2V1)
+ chroma_samp = DPU_CHROMA_H1V2;
+ else if (chroma_samp == DPU_CHROMA_H1V2)
+ chroma_samp = DPU_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & DPU_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & DPU_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ DPU_FETCH_CONFIG_RESET_VALUE |
+ ctx->mdp->highest_bank_bit << 18);
+ switch (ctx->catalog->caps->ubwc_version) {
+ case DPU_HW_UBWC_VER_10:
+ /* TODO: UBWC v1 case */
+ break;
+ case DPU_HW_UBWC_VER_20:
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ break;
+ case DPU_HW_UBWC_VER_30:
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ BIT(30) | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ break;
+ case DPU_HW_UBWC_VER_40:
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30));
+ break;
+ }
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (DPU_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+ else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+
+ DPU_REG_WRITE(c, format_off + idx, src_format);
+ DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+ DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+ /* clear previous UBWC error */
+ DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < DPU_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *sspp,
+ void *scaler_cfg)
+{
+ u32 idx;
+ struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg)
+ return;
+
+ dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+ ctx->cap->sblk->scaler_blk.version,
+ sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+ u32 idx;
+
+ if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+ return 0;
+
+ return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/*
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_index)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ src_size_off = SSPP_SRC_SIZE;
+ src_xy_off = SSPP_SRC_XY;
+ out_size_off = SSPP_OUT_SIZE;
+ out_xy_off = SSPP_OUT_XY;
+ } else {
+ src_size_off = SSPP_SRC_SIZE_REC1;
+ src_xy_off = SSPP_SRC_XY_REC1;
+ out_size_off = SSPP_OUT_SIZE_REC1;
+ out_xy_off = SSPP_OUT_XY_REC1;
+ }
+
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+ src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+ drm_rect_width(&cfg->src_rect);
+ dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+ dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+ drm_rect_width(&cfg->dst_rect);
+
+ if (rect_index == DPU_SSPP_RECT_SOLO) {
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+ } else {
+ ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+ ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+ if (rect_index == DPU_SSPP_RECT_0) {
+ ystride0 = (ystride0 & 0xFFFF0000) |
+ (cfg->layout.plane_pitch[0] & 0x0000FFFF);
+ ystride1 = (ystride1 & 0xFFFF0000)|
+ (cfg->layout.plane_pitch[2] & 0x0000FFFF);
+ } else {
+ ystride0 = (ystride0 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[0] << 16) &
+ 0xFFFF0000);
+ ystride1 = (ystride1 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[2] << 16) &
+ 0xFFFF0000);
+ }
+ }
+
+ /* rectangle register programming */
+ DPU_REG_WRITE(c, src_size_off + idx, src_size);
+ DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+ DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+ DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO) {
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+ } else if (rect_mode == DPU_SSPP_RECT_0) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ }
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+ const struct dpu_csc_cfg *data)
+{
+ u32 idx;
+ bool csc10 = false;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ idx += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+ dpu_sspp_multirect_index rect_index)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+ else
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+ color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+ u32 danger_lut,
+ u32 safe_lut)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+ u64 creq_lut)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+ creq_lut >> 32);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, creq_lut);
+ }
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_cdp_cfg *cfg,
+ enum dpu_sspp_multirect_index index)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+ u32 cdp_cntl_offset = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (index == DPU_SSPP_RECT_SOLO || index == DPU_SSPP_RECT_0)
+ cdp_cntl_offset = SSPP_CDP_CNTL;
+ else
+ cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_SSPP_SRC, &features)) {
+ c->ops.setup_format = dpu_hw_sspp_setup_format;
+ c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+ c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+ }
+
+ if (test_bit(DPU_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ dpu_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(DPU_SSPP_CSC, &features) ||
+ test_bit(DPU_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+ if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
+ c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
+ test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
+ c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+ }
+
+ if (test_bit(DPU_SSPP_CDP, &features))
+ c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+#ifdef CONFIG_DEBUG_FS
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry)
+{
+ const struct dpu_sspp_cfg *cfg = hw_pipe->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
+ struct dentry *debugfs_root;
+ char sspp_name[32];
+
+ snprintf(sspp_name, sizeof(sspp_name), "%d", hw_pipe->idx);
+
+ /* create overall sub-directory for the pipe */
+ debugfs_root =
+ debugfs_create_dir(sspp_name, entry);
+
+ /* don't error check these */
+ debugfs_create_xul("features", 0600,
+ debugfs_root, (unsigned long *)&hw_pipe->cap->features);
+
+ /* add register dump support */
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ debugfs_root,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ debugfs_root,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT))
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ debugfs_root,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ return 0;
+}
+#endif
+
+
+static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *catalog,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->blk_addr = addr + catalog->sspp[i].base;
+ b->log_mask = DPU_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog)
+{
+ struct dpu_hw_pipe *hw_pipe;
+ const struct dpu_sspp_cfg *cfg;
+
+ if (!addr || !catalog)
+ return ERR_PTR(-EINVAL);
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(hw_pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ hw_pipe->catalog = catalog;
+ hw_pipe->mdp = &catalog->mdp[0];
+ hw_pipe->idx = idx;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+ return hw_pipe;
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 000000000..0c95b7e64
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR BIT(0)
+#define DPU_SSPP_FLIP_UD BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
+#define DPU_SSPP_ROT_90 BIT(3)
+#define DPU_SSPP_SOLID_FILL BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \
+ BIT(DPU_SSPP_SCALER_QSEED2) | \
+ BIT(DPU_SSPP_SCALER_QSEED3) | \
+ BIT(DPU_SSPP_SCALER_QSEED3LITE) | \
+ BIT(DPU_SSPP_SCALER_QSEED4))
+
+/*
+ * Define all CSC feature bits in catalog
+ */
+#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \
+ BIT(DPU_SSPP_CSC_10BIT))
+
+/**
+ * Component indices
+ */
+enum {
+ DPU_SSPP_COMP_0,
+ DPU_SSPP_COMP_1_2,
+ DPU_SSPP_COMP_2,
+ DPU_SSPP_COMP_3,
+
+ DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+ DPU_SSPP_RECT_SOLO = 0,
+ DPU_SSPP_RECT_0,
+ DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+ DPU_SSPP_MULTIRECT_NONE = 0,
+ DPU_SSPP_MULTIRECT_PARALLEL,
+ DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+ DPU_FRAME_LINEAR,
+ DPU_FRAME_TILE_A4X,
+ DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+ DPU_SCALE_FILTER_NEAREST = 0,
+ DPU_SCALE_FILTER_BIL,
+ DPU_SCALE_FILTER_PCMN,
+ DPU_SCALE_FILTER_CA,
+ DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+ DPU_SCALE_ALPHA_PIXEL_REP,
+ DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+ DPU_SCALE_2D_4X4,
+ DPU_SCALE_2D_CIR,
+ DPU_SCALE_1D_SEP,
+ DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[DPU_MAX_PLANES];
+ int phase_step_x[DPU_MAX_PLANES];
+ int init_phase_y[DPU_MAX_PLANES];
+ int phase_step_y[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[DPU_MAX_PLANES];
+ int num_ext_pxls_right[DPU_MAX_PLANES];
+ int num_ext_pxls_top[DPU_MAX_PLANES];
+ int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[DPU_MAX_PLANES];
+ int right_ftch[DPU_MAX_PLANES];
+ int top_ftch[DPU_MAX_PLANES];
+ int btm_ftch[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[DPU_MAX_PLANES];
+ int right_rpt[DPU_MAX_PLANES];
+ int top_rpt[DPU_MAX_PLANES];
+ int btm_rpt[DPU_MAX_PLANES];
+
+ uint32_t roi_w[DPU_MAX_PLANES];
+ uint32_t roi_h[DPU_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+ enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index: index of the rectangle of SSPP
+ * @mode: parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+ struct dpu_hw_fmt_layout layout;
+ struct drm_rect src_rect;
+ struct drm_rect dst_rect;
+ enum dpu_sspp_multirect_index index;
+ enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+ u64 size;
+ u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ * @index: rectangle index in multirect
+ */
+ void (*setup_format)(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_rects)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_pe - setup pipe pixel extension
+ * @ctx: Pointer to pipe context
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_pe)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, const struct dpu_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ * @index: rectangle index in multirect
+ */
+ void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_multirect - setup multirect configuration
+ * @ctx: Pointer to pipe context
+ * @index: rectangle index in multirect
+ * @mode: parallel fetch / time multiplex multirect mode
+ */
+
+ void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_sharp_cfg *cfg);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ *
+ */
+ void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+ u32 danger_lut,
+ u32 safe_lut);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @creq_lut: LUT for generate creq level based on fill level
+ *
+ */
+ void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+ u64 creq_lut);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ void *scaler_cfg);
+
+ /**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ * @index: rectangle index in multirect
+ */
+ void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_cdp_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+ const struct dpu_mdss_cfg *catalog;
+ const struct dpu_mdp_cfg *mdp;
+
+ /* Pipe */
+ enum dpu_sspp idx;
+ const struct dpu_sspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_sspp_ops ops;
+};
+
+struct dpu_kms;
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 000000000..c3110a25a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE 0x28
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL 0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
+ if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = DPU_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ DPU_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+ return;
+
+ c = &mdp->hw;
+ reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
+
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+ }
+ DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case DPU_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ DPU_REG_WRITE(c, wd_load_value,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = DPU_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ DPU_REG_WRITE(c, wd_ctl2, reg);
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
+ }
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
+
+ DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+ ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = dpu_hw_get_danger_status;
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+ ops->get_safe_status = dpu_hw_get_safe_status;
+
+ if (cap & BIT(DPU_MDP_AUDIO_SELECT))
+ ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->blk_addr = addr + m->mdp[i].base;
+ b->log_mask = DPU_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mdp *mdp;
+ const struct dpu_mdp_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->caps = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+ return mdp;
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 000000000..a1a9e44be
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe configuration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum dpu_intf_mode mode;
+ enum dpu_intf intf;
+ bool split_flush_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+ /** setup_split_pipe() : Registers are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * setup_vsync_source - setup vsync source configuration details
+ * @mdp: mdp top context driver
+ * @cfg: vsync source selection configuration
+ */
+ void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * intf_audio_select - select the external interface for audio
+ * @mdp: mdp top context driver
+ */
+ void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* top */
+ enum dpu_mdp idx;
+ const struct dpu_mdp_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 000000000..1b7439ae6
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+/* DPU_SCALER_QSEED3LITE */
+#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3LITE_COEF_LUT_CTRL 0x4C
+#define QSEED3LITE_COEF_LUT_SWAP_BIT 0
+#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60
+#define QSEED3LITE_FILTERS 2
+#define QSEED3LITE_SEPARABLE_LUTS 10
+#define QSEED3LITE_LUT_SIZE 33
+#define QSEED3LITE_SEP_LUT_SIZE \
+ (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32))
+
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & dpu_hw_util_log_mask)
+ DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, reg_off, val);
+ writel_relaxed(val, c->blk_addr + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->blk_addr + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+ return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + offset
+ + off_tbl[filter][i][1];
+ lut_len = off_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset;
+ u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 };
+
+ DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight);
+
+ if (!scaler3_cfg->sep_lut)
+ return;
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter];
+ for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+
+ if (!de_cfg->enable)
+ return;
+
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format)
+{
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (format && DPU_FORMAT_IS_YUV(format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag) {
+ if (scaler_version < 0x2004)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset);
+ else
+ _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset);
+ }
+
+ if (scaler_version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+ } else {
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+ DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+ if (format && !DPU_FORMAT_IS_DX(format))
+ op_mode |= BIT(14);
+
+ if (format && format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (scaler_version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+
+ DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset)
+{
+ return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ const struct dpu_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
+/**
+ * _dpu_hw_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
+
+/*
+ * note: Aside from encoders, input_sel should be set to 0x0 by default
+ */
+void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset, u8 input_sel)
+{
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, misr_ctrl_offset, MISR_CTRL_STATUS_CLEAR);
+
+ /* Clear old MISR value (in case it's read before a new value is calculated)*/
+ wmb();
+
+ config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK |
+ ((input_sel & 0xF) << 24);
+ DPU_REG_WRITE(c, misr_ctrl_offset, config);
+}
+
+int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ u32 misr_signature_offset,
+ u32 *misr_value)
+{
+ u32 ctrl = 0;
+
+ if (!misr_value)
+ return -EINVAL;
+
+ ctrl = DPU_REG_READ(c, misr_ctrl_offset);
+
+ if (!(ctrl & MISR_CTRL_ENABLE))
+ return -ENODATA;
+
+ if (!(ctrl & MISR_CTRL_STATUS))
+ return -EINVAL;
+
+ *misr_value = DPU_REG_READ(c, misr_signature_offset);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 000000000..4ae2a4343
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,360 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+#define MISR_FRAME_COUNT 0x1
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @blk_addr: hw block register mapped address
+ * @log_mask: log mask for this block
+ */
+struct dpu_hw_blk_reg_map {
+ void __iomem *blk_addr;
+ u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_blk - opaque hardware block object
+ */
+struct dpu_hw_blk {
+ /* opaque */
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ * @ dir_weight: Directional weight
+ */
+struct dpu_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ u32 preload_x[DPU_MAX_PLANES];
+ u32 preload_y[DPU_MAX_PLANES];
+ u32 src_width[DPU_MAX_PLANES];
+ u32 src_height[DPU_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_hw_scaler3_de_cfg de;
+
+ u32 dir_weight;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+ int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[DPU_MAX_PLANES];
+ int32_t right_ftch[DPU_MAX_PLANES];
+ int32_t top_ftch[DPU_MAX_PLANES];
+ int32_t btm_ftch[DPU_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[DPU_MAX_PLANES];
+ int32_t right_rpt[DPU_MAX_PLANES];
+ int32_t top_rpt[DPU_MAX_PLANES];
+ int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct dpu_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ uint32_t preload_x[DPU_MAX_PLANES];
+ uint32_t preload_y[DPU_MAX_PLANES];
+ uint32_t src_width[DPU_MAX_PLANES];
+ uint32_t src_height[DPU_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_drm_de_v1 de;
+};
+
+/**
+ * struct dpu_hw_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_*_CDP_PRELOAD_AHEAD_32,
+ * DPU_*_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ const struct dpu_csc_cfg *data, bool csc10);
+
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl);
+
+void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset, u8 input_sel);
+
+int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c,
+ u32 misr_ctrl_offset,
+ u32 misr_signature_offset,
+ u32 *misr_value);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 000000000..16c56e240
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
+#define VBIF_XIN_PND_ERR 0x0190
+#define VBIF_XIN_SRC_ERR 0x0194
+#define VBIF_XIN_CLR_ERR 0x019C
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000(vbif) (VBIF_XINL_QOS_RP_REMAP_000 + (vbif)->cap->qos_rp_remap_size)
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 pnd, src;
+
+ if (!vbif)
+ return;
+ c = &vbif->hw;
+ pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+ src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+ if (pnd_errors)
+ *pnd_errors = pnd;
+ if (src_errors)
+ *src_errors = src;
+
+ DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+ if (!vbif)
+ return;
+
+ c = &vbif->hw;
+
+ reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(vbif);
+ reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+ reg_shift = (xin_id & 0x7) * 4;
+
+ reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high);
+
+ mask = 0x7 << reg_shift;
+
+ reg_val &= ~mask;
+ reg_val |= (remap_level << reg_shift) & mask;
+
+ reg_val_lvl &= ~mask;
+ reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+ DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = dpu_hw_set_limit_conf;
+ ops->get_limit_conf = dpu_hw_get_limit_conf;
+ ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+ if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+ ops->set_qos_remap = dpu_hw_set_qos_remap;
+ ops->set_mem_type = dpu_hw_set_mem_type;
+ ops->clear_errors = dpu_hw_clear_errors;
+ ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->blk_addr = addr + m->vbif[i].base;
+ b->log_mask = DPU_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_vbif *c;
+ const struct dpu_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 000000000..6417aa28d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id);
+
+ /**
+ * set_qos_remap - set QoS priority remap
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @level: priority level
+ * @remap_level: remapped level
+ */
+ void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value);
+
+ /**
+ * clear_errors - clear any vbif errors
+ * This function clears any detected pending/source errors
+ * on the VBIF interface, and optionally returns the detected
+ * error mask(s).
+ * @vbif: vbif context driver
+ * @pnd_errors: pointer to pending error reporting variable
+ * @src_errors: pointer to source error reporting variable
+ */
+ void (*clear_errors)(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+ /* base */
+ struct dpu_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum dpu_vbif idx;
+ const struct dpu_vbif_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
new file mode 100644
index 000000000..a3e413d27
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+ /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_wb.h"
+#include "dpu_formats.h"
+#include "dpu_kms.h"
+
+#define WB_DST_FORMAT 0x000
+#define WB_DST_OP_MODE 0x004
+#define WB_DST_PACK_PATTERN 0x008
+#define WB_DST0_ADDR 0x00C
+#define WB_DST1_ADDR 0x010
+#define WB_DST2_ADDR 0x014
+#define WB_DST3_ADDR 0x018
+#define WB_DST_YSTRIDE0 0x01C
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_DITHER_BITDEPTH 0x024
+#define WB_DST_MATRIX_ROW0 0x030
+#define WB_DST_MATRIX_ROW1 0x034
+#define WB_DST_MATRIX_ROW2 0x038
+#define WB_DST_MATRIX_ROW3 0x03C
+#define WB_DST_WRITE_CONFIG 0x048
+#define WB_ROTATION_DNSCALER 0x050
+#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define WB_N16_INIT_PHASE_X_C03 0x060
+#define WB_N16_INIT_PHASE_X_C12 0x064
+#define WB_N16_INIT_PHASE_Y_C03 0x068
+#define WB_N16_INIT_PHASE_Y_C12 0x06C
+#define WB_OUT_SIZE 0x074
+#define WB_ALPHA_X_VALUE 0x078
+#define WB_DANGER_LUT 0x084
+#define WB_SAFE_LUT 0x088
+#define WB_QOS_CTRL 0x090
+#define WB_CREQ_LUT_0 0x098
+#define WB_CREQ_LUT_1 0x09C
+#define WB_UBWC_STATIC_CTRL 0x144
+#define WB_MUX 0x150
+#define WB_CROP_CTRL 0x154
+#define WB_CROP_OFFSET 0x158
+#define WB_CSC_BASE 0x260
+#define WB_DST_ADDR_SW_STATUS 0x2B0
+#define WB_CDP_CNTL 0x2B4
+#define WB_OUT_IMAGE_SIZE 0x2C0
+#define WB_OUT_XY 0x2C4
+
+/* WB_QOS_CTRL */
+#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+
+static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
+ const struct dpu_mdss_cfg *m, void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->wb_count; i++) {
+ if (wb == m->wb[i].id) {
+ b->blk_addr = addr + m->wb[i].base;
+ b->log_mask = DPU_DBG_MASK_WB;
+ return &m->wb[i];
+ }
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
+ DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
+ DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
+ DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
+}
+
+static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = data->dest.format;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 write_config = 0;
+ u32 opmode = 0;
+ u32 dst_addr_sw = 0;
+
+ chroma_samp = fmt->chroma_sample;
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable ||
+ !(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ ystride0 = data->dest.plane_pitch[0] |
+ (data->dest.plane_pitch[1] << 16);
+ ystride1 = data->dest.plane_pitch[2] |
+ (data->dest.plane_pitch[3] << 16);
+
+ if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi))
+ outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi);
+ else
+ outsize = (data->dest.height << 16) | data->dest.width;
+
+ DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
+ DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format);
+ DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode);
+ DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, outsize);
+ DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
+ DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
+}
+
+static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 image_size, out_size, out_xy;
+
+ image_size = (wb->dest.height << 16) | wb->dest.width;
+ out_xy = 0;
+ out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
+
+ DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+ DPU_REG_WRITE(c, WB_OUT_XY, out_xy);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, out_size);
+}
+
+static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_qos_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 qos_ctrl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ DPU_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
+ DPU_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
+
+ /*
+ * for chipsets not using DPU_WB_QOS_8LVL but still using DPU
+ * driver such as msm8998, the reset value of WB_CREQ_LUT is
+ * sufficient for writeback to work. SW doesn't need to explicitly
+ * program a value.
+ */
+ if (ctx->caps && test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features)) {
+ DPU_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
+ DPU_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
+}
+
+static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
+ struct dpu_hw_cdp_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->preload_ahead == DPU_WB_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
+}
+
+static void dpu_hw_wb_bind_pingpong_blk(
+ struct dpu_hw_wb *ctx,
+ bool enable, const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int mux_cfg;
+
+ if (!ctx)
+ return;
+
+ c = &ctx->hw;
+
+ mux_cfg = DPU_REG_READ(c, WB_MUX);
+ mux_cfg &= ~0xf;
+
+ if (enable)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, WB_MUX, mux_cfg);
+}
+
+static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
+ unsigned long features)
+{
+ ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
+ ops->setup_outformat = dpu_hw_wb_setup_format;
+
+ if (test_bit(DPU_WB_XY_ROI_OFFSET, &features))
+ ops->setup_roi = dpu_hw_wb_roi;
+
+ if (test_bit(DPU_WB_QOS, &features))
+ ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut;
+
+ if (test_bit(DPU_WB_CDP, &features))
+ ops->setup_cdp = dpu_hw_wb_setup_cdp;
+
+ if (test_bit(DPU_WB_INPUT_CTRL, &features))
+ ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
+}
+
+struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_wb *c;
+ const struct dpu_wb_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _wb_offset(idx, m, addr, &c->hw);
+ if (IS_ERR(cfg)) {
+ WARN(1, "Unable to find wb idx=%d\n", idx);
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->mdp = &m->mdp[0];
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_wb_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
+{
+ kfree(hw_wb);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
new file mode 100644
index 000000000..3ff5a4854
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_WB_H
+#define _DPU_HW_WB_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_pingpong.h"
+
+struct dpu_hw_wb;
+
+struct dpu_hw_wb_cfg {
+ struct dpu_hw_fmt_layout dest;
+ enum dpu_intf_mode intf_mode;
+ struct drm_rect roi;
+ struct drm_rect crop;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_WB_CDP_PRELOAD_AHEAD_32,
+ DPU_WB_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_wb_qos_cfg : Writeback pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_wb_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ bool danger_safe_en;
+};
+
+/**
+ *
+ * struct dpu_hw_wb_ops : Interface to the wb hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_outaddress: setup output address from the writeback job
+ * @setup_outformat: setup output format of writeback block from writeback job
+ * @setup_qos_lut: setup qos LUT for writeback block based on input
+ * @setup_cdp: setup chroma down prefetch block for writeback block
+ * @bind_pingpong_blk: enable/disable the connection with ping-pong block
+ */
+struct dpu_hw_wb_ops {
+ void (*setup_outaddress)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_outformat)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_roi)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_qos_lut)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_qos_cfg *cfg);
+
+ void (*setup_cdp)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_cdp_cfg *cfg);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
+ bool enable, const enum dpu_pingpong pp);
+};
+
+/**
+ * struct dpu_hw_wb : WB driver object
+ * @hw: block hardware details
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: hardware index number within type
+ * @wb_hw_caps: hardware capabilities
+ * @ops: function pointers
+ * @hw_mdp: MDP top level hardware block
+ */
+struct dpu_hw_wb {
+ struct dpu_hw_blk_reg_map hw;
+ const struct dpu_mdp_cfg *mdp;
+
+ /* wb path */
+ int idx;
+ const struct dpu_wb_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_wb_ops ops;
+
+ struct dpu_hw_mdp *hw_mdp;
+};
+
+/**
+ * dpu_hw_wb_init(): Initializes and return writeback hw driver object.
+ * @idx: wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_wb_destroy(): Destroy writeback hw driver object.
+ * @hw_wb: Pointer to writeback hw driver object
+ */
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
+
+#endif /*_DPU_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 000000000..93081e82c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define HDMI_DP_CORE_SELECT 0x408
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 000000000..b7901b666
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1337 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/of_irq.h>
+#include <linux/pm_opp.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_writeback.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "disp/msm_disp_snapshot.h"
+
+#include "dpu_core_irq.h"
+#include "dpu_crtc.h"
+#include "dpu_encoder.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_kms.h"
+#include "dpu_plane.h"
+#include "dpu_vbif.h"
+#include "dpu_writeback.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct dpu_kms *kms = (struct dpu_kms *)s->private;
+ struct dpu_danger_safe_status status;
+ int i;
+
+ if (!kms->hw_mdp) {
+ DPU_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+ pm_runtime_get_sync(&kms->pdev->dev);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_safe_status)
+ kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
+ &status);
+ }
+ pm_runtime_put_sync(&kms->pdev->dev);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, true);
+}
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, false);
+}
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
+
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int len;
+ char buf[40];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ int disable_panic;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ struct dentry *entry = debugfs_create_dir("danger", parent);
+
+ debugfs_create_file("danger_status", 0600, entry,
+ dpu_kms, &dpu_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0600, entry,
+ dpu_kms, &dpu_debugfs_safe_stats_fops);
+ debugfs_create_file("disable_danger", 0600, entry,
+ dpu_kms, &dpu_plane_danger_enable);
+
+}
+
+/*
+ * Companion structure for dpu_debugfs_create_regset32.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct dpu_debugfs_regset32 *regset = s->private;
+ struct dpu_kms *dpu_kms = regset->dpu_kms;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!dpu_kms->mmio)
+ return 0;
+
+ base = dpu_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+ .open = dpu_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+ struct dpu_debugfs_regset32 *regset;
+
+ if (WARN_ON(!name || !dpu_kms || !length))
+ return;
+
+ regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(offset, 4);
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
+
+ debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
+}
+
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ void *p = dpu_hw_util_get_log_mask_ptr();
+ struct dentry *entry;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!p)
+ return -EINVAL;
+
+ /* Only create a set of debugfs for the primary node, ignore render nodes */
+ if (minor->type != DRM_MINOR_PRIMARY)
+ return 0;
+
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+
+ entry = debugfs_create_dir("debug", minor->debugfs_root);
+
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
+
+ dpu_debugfs_danger_init(dpu_kms, entry);
+ dpu_debugfs_vbif_init(dpu_kms, entry);
+ dpu_debugfs_core_irq_init(dpu_kms, entry);
+ dpu_debugfs_sspp_init(dpu_kms, entry);
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (priv->dp[i])
+ msm_dp_debugfs_init(priv->dp[i], minor);
+ }
+
+ return dpu_core_perf_debugfs_init(dpu_kms, entry);
+}
+#endif
+
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct dpu_global_state *
+dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
+{
+ return to_dpu_global_state(dpu_kms->global_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_private_state *priv_state;
+ int ret;
+
+ ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_state = drm_atomic_get_private_obj_state(s,
+ &dpu_kms->global_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_dpu_global_state(priv_state);
+}
+
+static struct drm_private_state *
+dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dpu_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dpu_global_state *dpu_state = to_dpu_global_state(state);
+
+ kfree(dpu_state);
+}
+
+static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
+ .atomic_duplicate_state = dpu_kms_global_duplicate_state,
+ .atomic_destroy_state = dpu_kms_global_destroy_state,
+};
+
+static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct dpu_global_state *state;
+
+ drm_modeset_lock_init(&dpu_kms->global_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
+ &state->base,
+ &dpu_kms_global_state_funcs);
+ return 0;
+}
+
+static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
+{
+ struct icc_path *path0;
+ struct icc_path *path1;
+ struct drm_device *dev = dpu_kms->dev;
+ struct device *dpu_dev = dev->dev;
+
+ path0 = msm_icc_get(dpu_dev, "mdp0-mem");
+ path1 = msm_icc_get(dpu_dev, "mdp1-mem");
+
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ dpu_kms->path[0] = path0;
+ dpu_kms->num_paths = 1;
+
+ if (!IS_ERR_OR_NULL(path1)) {
+ dpu_kms->path[1] = path1;
+ dpu_kms->num_paths++;
+ }
+ return 0;
+}
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_enable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+}
+
+static void dpu_kms_disable_commit(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
+static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
+ ktime_t vsync_time;
+
+ if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
+ return vsync_time;
+ }
+
+ return ktime_get();
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_encoder *encoder;
+ int i;
+
+ if (!kms)
+ return;
+
+ /* Call prepare_commit for all affected encoders */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc_state->encoder_mask) {
+ dpu_encoder_prepare_commit(encoder);
+ }
+ }
+}
+
+static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ DPU_ATRACE_BEGIN("kms_complete_commit");
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_crtc_complete_commit(crtc);
+
+ DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ dev = crtc->dev;
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+ ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
+ dpu_kms_wait_for_commit_done(kms, crtc);
+}
+
+static int _dpu_kms_initialize_dsi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int i, rc = 0;
+
+ if (!(priv->dsi[0] || priv->dsi[1]))
+ return rc;
+
+ /*
+ * We support following confiurations:
+ * - Single DSI host (dsi0 or dsi1)
+ * - Two independent DSI hosts
+ * - Bonded DSI0 and DSI1 hosts
+ *
+ * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
+ */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ int other = (i + 1) % 2;
+
+ if (!priv->dsi[i])
+ continue;
+
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
+ !msm_dsi_is_master_dsi(priv->dsi[i]))
+ continue;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.intf_type = encoder->encoder_type;
+
+ rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ i, rc);
+ break;
+ }
+
+ info.h_tile_instance[info.num_of_h_tiles++] = i;
+ info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
+
+ info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
+
+ if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
+ rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ other, rc);
+ break;
+ }
+
+ info.h_tile_instance[info.num_of_h_tiles++] = other;
+ }
+
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+ }
+
+ return rc;
+}
+
+static int _dpu_kms_initialize_displayport(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int rc;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ memset(&info, 0, sizeof(info));
+ rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+
+ info.num_of_h_tiles = 1;
+ info.h_tile_instance[0] = i;
+ info.intf_type = encoder->encoder_type;
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc) {
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_kms_initialize_writeback(struct drm_device *dev,
+ struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
+ const u32 *wb_formats, int n_formats)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int rc;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ memset(&info, 0, sizeof(info));
+
+ rc = dpu_writeback_init(dev, encoder, wb_formats,
+ n_formats);
+ if (rc) {
+ DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+
+ info.num_of_h_tiles = 1;
+ /* use only WB idx 2 instance for DPU */
+ info.h_tile_instance[0] = WB_2;
+ info.intf_type = encoder->encoder_type;
+
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc) {
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @dpu_kms: Pointer to dpu kms structure
+ * Returns: Zero on success
+ */
+static int _dpu_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ int rc = 0;
+ int i;
+
+ rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+ if (rc) {
+ DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
+ if (rc) {
+ DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Since WB isn't a driver check the catalog before initializing */
+ if (dpu_kms->catalog->wb_count) {
+ for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
+ if (dpu_kms->catalog->wb[i].id == WB_2) {
+ rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
+ dpu_kms->catalog->wb[i].format_list,
+ dpu_kms->catalog->wb[i].num_formats);
+ if (rc) {
+ DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+#define MAX_PLANES 20
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
+
+ struct msm_drm_private *priv;
+ const struct dpu_mdss_cfg *catalog;
+
+ int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
+ int max_crtc_count;
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+ catalog = dpu_kms->catalog;
+
+ /*
+ * Create encoder and query display drivers to create
+ * bridges and connectors
+ */
+ ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ if (ret)
+ return ret;
+
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
+
+ max_crtc_count = min(catalog->mixer_count, num_encoders);
+
+ /* Create the planes, keeping track of one primary/cursor per crtc */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ enum drm_plane_type type;
+
+ if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
+ && cursor_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
+ type, catalog->sspp[i].features,
+ catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1);
+ if (IS_ERR(plane)) {
+ DPU_ERROR("dpu_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ return ret;
+ }
+
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor_planes[cursor_planes_idx++] = plane;
+ else if (type == DRM_PLANE_TYPE_PRIMARY)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ return ret;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+ int i;
+
+ if (dpu_kms->hw_intr)
+ dpu_hw_intr_destroy(dpu_kms->hw_intr);
+ dpu_kms->hw_intr = NULL;
+
+ /* safe to call these more than once during shutdown */
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ if (dpu_kms->catalog) {
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i]) {
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
+ dpu_kms->hw_vbif[i] = NULL;
+ }
+ }
+ }
+
+ if (dpu_kms->rm_init)
+ dpu_rm_destroy(&dpu_kms->rm);
+ dpu_kms->rm_init = false;
+
+ dpu_kms->catalog = NULL;
+
+ if (dpu_kms->vbif[VBIF_NRT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+
+ if (dpu_kms->vbif[VBIF_RT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+
+ if (dpu_kms->hw_mdp)
+ dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
+ dpu_kms->hw_mdp = NULL;
+
+ if (dpu_kms->mmio)
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+ dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+
+ _dpu_kms_hw_destroy(dpu_kms);
+
+ msm_kms_destroy(&dpu_kms->base);
+
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&dpu_kms->pdev->dev);
+}
+
+static int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
+
+ if (!dpu_kms || !dpu_kms->dev)
+ return -EINVAL;
+
+ priv = dpu_kms->dev->dev_private;
+ if (!priv)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
+ msm_dp_irq_postinstall(priv->dp[i]);
+
+ return 0;
+}
+
+static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
+{
+ int i;
+ struct dpu_kms *dpu_kms;
+ const struct dpu_mdss_cfg *cat;
+
+ dpu_kms = to_dpu_kms(kms);
+
+ cat = dpu_kms->catalog;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* dump CTL sub-blocks HW regs info */
+ for (i = 0; i < cat->ctl_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
+ dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
+
+ /* dump DSPP sub-blocks HW regs info */
+ for (i = 0; i < cat->dspp_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
+ dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
+
+ /* dump INTF sub-blocks HW regs info */
+ for (i = 0; i < cat->intf_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
+ dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
+
+ /* dump PP sub-blocks HW regs info */
+ for (i = 0; i < cat->pingpong_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
+ dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
+
+ /* dump SSPP sub-blocks HW regs info */
+ for (i = 0; i < cat->sspp_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
+ dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
+
+ /* dump LM sub-blocks HW regs info */
+ for (i = 0; i < cat->mixer_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
+ dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
+
+ /* dump WB sub-blocks HW regs info */
+ for (i = 0; i < cat->wb_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
+ dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
+
+ msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
+ dpu_kms->mmio + cat->mdp[0].base, "top");
+
+ /* dump DSC sub-blocks HW regs info */
+ for (i = 0; i < cat->dsc_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len,
+ dpu_kms->mmio + cat->dsc[i].base, "dsc_%d", i);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = dpu_kms_hw_init,
+ .irq_preinstall = dpu_core_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
+ .irq_uninstall = dpu_core_irq_uninstall,
+ .irq = dpu_core_irq,
+ .enable_commit = dpu_kms_enable_commit,
+ .disable_commit = dpu_kms_disable_commit,
+ .vsync_time = dpu_kms_vsync_time,
+ .prepare_commit = dpu_kms_prepare_commit,
+ .flush_commit = dpu_kms_flush_commit,
+ .wait_flush = dpu_kms_wait_flush,
+ .complete_commit = dpu_kms_complete_commit,
+ .enable_vblank = dpu_kms_enable_vblank,
+ .disable_vblank = dpu_kms_disable_vblank,
+ .check_modified_format = dpu_format_check_modified_format,
+ .get_format = dpu_get_msm_format,
+ .destroy = dpu_kms_destroy,
+ .snapshot = dpu_kms_mdp_snapshot,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = dpu_kms_debugfs_init,
+#endif
+};
+
+static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_mmu *mmu;
+
+ if (!dpu_kms->base.aspace)
+ return;
+
+ mmu = dpu_kms->base.aspace->mmu;
+
+ mmu->funcs->detach(mmu);
+ msm_gem_address_space_put(dpu_kms->base.aspace);
+
+ dpu_kms->base.aspace = NULL;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_kms_init_aspace(dpu_kms->dev);
+ if (IS_ERR(aspace))
+ return PTR_ERR(aspace);
+
+ dpu_kms->base.aspace = aspace;
+
+ return 0;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+ struct clk *clk;
+
+ clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
+ if (!clk)
+ return -EINVAL;
+
+ return clk_get_rate(clk);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return rc;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+
+ rc = dpu_kms_global_obj_init(dpu_kms);
+ if (rc)
+ return rc;
+
+ atomic_set(&dpu_kms->bandwidth_ref, 0);
+
+ dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ rc = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", rc);
+ dpu_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", rc);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ }
+
+ dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma");
+ if (IS_ERR(dpu_kms->reg_dma)) {
+ dpu_kms->reg_dma = NULL;
+ DPU_DEBUG("REG_DMA is not defined");
+ }
+
+ dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
+ rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
+ if (rc < 0)
+ goto error;
+
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+
+ pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+ dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+ if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+ rc = PTR_ERR(dpu_kms->catalog);
+ if (!dpu_kms->catalog)
+ rc = -EINVAL;
+ DPU_ERROR("catalog init failed: %d\n", rc);
+ dpu_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _dpu_kms_mmu_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
+ if (rc) {
+ DPU_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->rm_init = true;
+
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
+ dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_mdp)) {
+ rc = PTR_ERR(dpu_kms->hw_mdp);
+ DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+ dpu_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+ DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+ msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ rc = PTR_ERR(dpu_kms->hw_intr);
+ DPU_ERROR("hw_intr init failed: %d\n", rc);
+ dpu_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width =
+ dpu_kms->catalog->caps->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ dev->max_vblank_count = 0xffffffff;
+ /* Disable vblank irqs aggressively for power-saving */
+ dev->vblank_disable_immediate = true;
+
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dpu_vbif_init_memtypes(dpu_kms);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+
+drm_obj_init_err:
+ dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+ _dpu_kms_hw_destroy(dpu_kms);
+
+ return rc;
+}
+
+static int dpu_kms_init(struct drm_device *ddev)
+{
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct device *dev = ddev->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms;
+ int irq;
+ struct dev_pm_opp *opp;
+ int ret = 0;
+ unsigned long max_freq = ULONG_MAX;
+
+ dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ ret = devm_pm_opp_set_clkname(dev, "core");
+ if (ret)
+ return ret;
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
+
+ ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
+ if (ret < 0) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ return ret;
+ }
+ dpu_kms->num_clocks = ret;
+
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ dev_pm_opp_set_rate(dev, max_freq);
+
+ ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
+ if (ret) {
+ DPU_ERROR("failed to init kms, ret=%d\n", ret);
+ return ret;
+ }
+ dpu_kms->dev = ddev;
+ dpu_kms->pdev = pdev;
+
+ pm_runtime_enable(&pdev->dev);
+ dpu_kms->rpm_enabled = true;
+
+ priv->kms = &dpu_kms->base;
+
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (!irq) {
+ DPU_ERROR("failed to get irq\n");
+ return -EINVAL;
+ }
+ dpu_kms->base.irq = irq;
+
+ return 0;
+}
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, dpu_kms_init);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+ int i;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(dev, 0);
+ clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
+
+ for (i = 0; i < dpu_kms->num_paths; i++)
+ icc_set_bw(dpu_kms->path[i], 0, 0);
+
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *encoder;
+ struct drm_device *ddev;
+
+ ddev = dpu_kms->dev;
+
+ rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
+ if (rc) {
+ DPU_ERROR("clock enable failed rc:%d\n", rc);
+ return rc;
+ }
+
+ dpu_vbif_init_memtypes(dpu_kms);
+
+ drm_for_each_encoder(encoder, ddev)
+ dpu_encoder_virt_runtime_resume(encoder);
+
+ return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,msm8998-dpu", },
+ { .compatible = "qcom,qcm2290-dpu", },
+ { .compatible = "qcom,sdm845-dpu", },
+ { .compatible = "qcom,sc7180-dpu", },
+ { .compatible = "qcom,sc7280-dpu", },
+ { .compatible = "qcom,sc8180x-dpu", },
+ { .compatible = "qcom,sm8150-dpu", },
+ { .compatible = "qcom,sm8250-dpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+ .probe = dpu_dev_probe,
+ .remove = dpu_dev_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "msm_dpu",
+ .of_match_table = dpu_dt_match,
+ .pm = &dpu_pm_ops,
+ },
+};
+
+void __init msm_dpu_register(void)
+{
+ platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+ platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 000000000..ed80ed678
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include <linux/interconnect.h>
+
+#include <drm/drm_drv.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...) \
+ do { \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
+ DRM_DEBUG(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE 12
+
+struct dpu_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ const struct dpu_mdss_cfg *catalog;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct dpu_hw_intr *hw_intr;
+
+ struct dpu_core_perf perf;
+
+ /*
+ * Global private object state, Do not access directly, use
+ * dpu_kms_global_get_state()
+ */
+ struct drm_modeset_lock global_state_lock;
+ struct drm_private_obj global_state;
+
+ struct dpu_rm rm;
+ bool rm_init;
+
+ struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+
+ bool has_danger_ctrl;
+
+ struct platform_device *pdev;
+ bool rpm_enabled;
+
+ struct clk_bulk_data *clocks;
+ size_t num_clocks;
+
+ /* reference count bandwidth requests, so we know when we can
+ * release bandwidth. Each atomic update increments, and frame-
+ * done event decrements. Additionally, for video mode, the
+ * reference is incremented when crtc is enabled, and decremented
+ * when disabled.
+ */
+ atomic_t bandwidth_ref;
+ struct icc_path *path[2];
+ u32 num_paths;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+struct dpu_global_state {
+ struct drm_private_state base;
+
+ uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0];
+ uint32_t mixer_to_enc_id[LM_MAX - LM_0];
+ uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
+ uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
+};
+
+struct dpu_global_state
+ *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms);
+struct dpu_global_state
+ *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s);
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.rst
+ *
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ */
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: pointer to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 000000000..62d48c0f9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1539 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DRM_DEBUG_ATOMIC("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define DPU_NAME_SIZE 12
+
+#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+ R0,
+ R1,
+ R_MAX
+};
+
+/*
+ * Default Preload Values
+ */
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2
+#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4
+
+#define DEFAULT_REFRESH_RATE 60
+
+static const uint32_t qcom_compressed_supported_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565,
+
+ DRM_FORMAT_NV12,
+};
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+ DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+ struct drm_plane base;
+
+ struct mutex lock;
+
+ enum dpu_sspp pipe;
+
+ struct dpu_hw_pipe *pipe_hw;
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+ const struct dpu_mdss_cfg *catalog;
+};
+
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_QCOM_COMPRESSED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+
+ return to_dpu_kms(priv->kms);
+}
+
+/**
+ * _dpu_plane_calc_bw - calculate bandwidth required for a plane
+ * @plane: Pointer to drm plane.
+ * @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
+ * Result: Updates calculated bandwidth in the plane state.
+ * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
+ * Prefill BW Equation: line src bytes * line_time
+ */
+static void _dpu_plane_calc_bw(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
+{
+ struct dpu_plane_state *pstate;
+ struct drm_display_mode *mode;
+ const struct dpu_format *fmt = NULL;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ int src_width, src_height, dst_height, fps;
+ u64 plane_pixel_rate, plane_bit_rate;
+ u64 plane_prefill_bw;
+ u64 plane_bw;
+ u32 hw_latency_lines;
+ u64 scale_factor;
+ int vbp, vpw, vfp;
+
+ pstate = to_dpu_plane_state(plane->state);
+ mode = &plane->state->crtc->mode;
+
+ fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
+
+ src_width = drm_rect_width(&pipe_cfg->src_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
+ fps = drm_mode_vrefresh(mode);
+ vbp = mode->vtotal - mode->vsync_end;
+ vpw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ hw_latency_lines = dpu_kms->catalog->perf->min_prefill_lines;
+ scale_factor = src_height > dst_height ?
+ mult_frac(src_height, 1, dst_height) : 1;
+
+ plane_pixel_rate = src_width * mode->vtotal * fps;
+ plane_bit_rate = plane_pixel_rate * fmt->bpp;
+
+ plane_bw = plane_bit_rate * scale_factor;
+
+ plane_prefill_bw = plane_bw * hw_latency_lines;
+
+ if ((vbp+vpw) > hw_latency_lines)
+ do_div(plane_prefill_bw, (vbp+vpw));
+ else if ((vbp+vpw+vfp) < hw_latency_lines)
+ do_div(plane_prefill_bw, (vbp+vpw+vfp));
+ else
+ do_div(plane_prefill_bw, hw_latency_lines);
+
+
+ pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw);
+}
+
+/**
+ * _dpu_plane_calc_clk - calculate clock required for a plane
+ * @plane: Pointer to drm plane.
+ * @pipe_cfg: Pointer to pipe configuration
+ * Result: Updates calculated clock in the plane state.
+ * Clock equation: dst_w * v_total * fps * (src_h / dst_h)
+ */
+static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg *pipe_cfg)
+{
+ struct dpu_plane_state *pstate;
+ struct drm_display_mode *mode;
+ int dst_width, src_height, dst_height, fps;
+
+ pstate = to_dpu_plane_state(plane->state);
+ mode = &plane->state->crtc->mode;
+
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
+ fps = drm_mode_vrefresh(mode);
+
+ pstate->plane_clk =
+ dst_width * mode->vtotal * fps;
+
+ if (src_height > dst_height) {
+ pstate->plane_clk *= src_height;
+ do_div(pstate->plane_clk, dst_height);
+ }
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_width: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+ const struct dpu_format *fmt, u32 src_width)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!fmt || !plane->state || !src_width || !fmt->bpp) {
+ DPU_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+ fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
+
+ /* FIXME: in multirect case account for the src_width of all the planes */
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == DPU_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ total_fl = (fixed_buff_size) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ pdpu->pipe - SSPP_VIG0,
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb, struct dpu_hw_pipe_cfg *pipe_cfg)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ const struct dpu_format *fmt = NULL;
+ u64 qos_lut;
+ u32 total_fl = 0, lut_usage;
+
+ if (!pdpu->is_rt_pipe) {
+ lut_usage = DPU_QOS_LUT_USAGE_NRT;
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+ total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+ drm_rect_width(&pipe_cfg->src_rect));
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+ else
+ lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+ }
+
+ qos_lut = _dpu_hw_get_qos_lut(
+ &pdpu->catalog->perf->qos_lut_tbl[lut_usage], total_fl);
+
+ trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ pdpu->is_rt_pipe, total_fl, qos_lut);
+
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, qos_lut);
+}
+
+/**
+ * _dpu_plane_set_danger_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ const struct dpu_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!pdpu->is_rt_pipe) {
+ danger_lut = pdpu->catalog->perf->danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ safe_lut = pdpu->catalog->perf->safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = pdpu->catalog->perf->danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ safe_lut = pdpu->catalog->perf->safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ } else {
+ danger_lut = pdpu->catalog->perf->danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ safe_lut = pdpu->catalog->perf->safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ }
+ }
+
+ trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ danger_lut,
+ safe_lut);
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ danger_lut,
+ safe_lut);
+
+ pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+ danger_lut, safe_lut);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+ bool enable, u32 flags)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+
+ memset(&pipe_qos_cfg, 0, sizeof(pipe_qos_cfg));
+
+ if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+ pipe_qos_cfg.creq_vblank = pdpu->pipe_hw->cap->sblk->creq_vblank;
+ pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_hw->cap->sblk->danger_vblank;
+ pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+ pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!pdpu->is_rt_pipe) {
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ pdpu->pipe - SSPP_VIG0,
+ pipe_qos_cfg.danger_safe_en,
+ pipe_qos_cfg.vblank_en,
+ pipe_qos_cfg.creq_vblank,
+ pipe_qos_cfg.danger_vblank,
+ pdpu->is_rt_pipe);
+
+ pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+ &pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ * @pipe_cfg: Pointer to pipe configuration
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct dpu_hw_pipe_cfg *pipe_cfg)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_vbif_set_ot_params ot_params;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = drm_rect_width(&pipe_cfg->src_rect);
+ ot_params.height = drm_rect_height(&pipe_cfg->src_rect);
+ ot_params.is_wfd = !pdpu->is_rt_pipe;
+ ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_qos_remap - set vbif QoS for the given plane
+ * @plane: Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_vbif_set_qos_params qos_params;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = VBIF_RT;
+ qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+ qos_params.is_rt = pdpu->is_rt_pipe;
+
+ DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt,
+ qos_params.clk_ctrl);
+
+ dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+static void _dpu_plane_set_scanout(struct drm_plane *plane,
+ struct dpu_plane_state *pstate,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ struct msm_gem_address_space *aspace = kms->base.aspace;
+ int ret;
+
+ ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+ else if (ret)
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+ trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+ &pipe_cfg->layout,
+ pstate->multirect_index);
+ pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+ pstate->multirect_index);
+ }
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct dpu_hw_scaler3_cfg *scale_cfg,
+ const struct dpu_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ uint32_t i;
+ bool inline_rotation = pstate->rotation & DRM_MODE_ROTATE_90;
+
+ /*
+ * For inline rotation cases, scaler config is post-rotation,
+ * so swap the dimensions here. However, pixel extension will
+ * need pre-rotation settings.
+ */
+ if (inline_rotation)
+ swap(src_w, src_h);
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = src_w;
+ scale_cfg->src_height[i] = src_h;
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+
+ if (pdpu->pipe_hw->cap->features &
+ BIT(DPU_SSPP_SCALER_QSEED4)) {
+ scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
+ } else {
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ }
+ }
+ if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
+ struct dpu_hw_pixel_ext *pixel_ext,
+ uint32_t src_w, uint32_t src_h,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ int i;
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ src_w /= chroma_subsmpl_h;
+ src_h /= chroma_subsmpl_v;
+ }
+
+ pixel_ext->num_ext_pxls_top[i] = src_h;
+ pixel_ext->num_ext_pxls_left[i] = src_w;
+ }
+}
+
+static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+};
+
+static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+};
+
+static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_plane *pdpu, const struct dpu_format *fmt)
+{
+ const struct dpu_csc_cfg *csc_ptr;
+
+ if (!pdpu) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ }
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return NULL;
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->pipe_hw->cap->features)
+ csc_ptr = &dpu_csc10_YUV2RGB_601L;
+ else
+ csc_ptr = &dpu_csc_YUV2RGB_601L;
+
+ DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+ csc_ptr->csc_mv[0],
+ csc_ptr->csc_mv[1],
+ csc_ptr->csc_mv[2]);
+
+ return csc_ptr;
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ const struct dpu_format *fmt, bool color_fill,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
+{
+ const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+ u32 src_width = drm_rect_width(&pipe_cfg->src_rect);
+ u32 src_height = drm_rect_height(&pipe_cfg->src_rect);
+ u32 dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ u32 dst_height = drm_rect_height(&pipe_cfg->dst_rect);
+
+ memset(&scaler3_cfg, 0, sizeof(scaler3_cfg));
+ memset(&pixel_ext, 0, sizeof(pixel_ext));
+
+ /* don't chroma subsample if decimating */
+ /* update scaler. calculate default config for QSEED3 */
+ _dpu_plane_setup_scaler3(pdpu, pstate,
+ src_width,
+ src_height,
+ dst_width,
+ dst_height,
+ &scaler3_cfg, fmt,
+ info->hsub, info->vsub);
+
+ /* configure pixel extension based on scalar config */
+ _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext,
+ src_width, src_height, info->hsub, info->vsub);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ pipe_cfg,
+ &scaler3_cfg);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu: Pointer to DPU plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+ uint32_t color, uint32_t alpha)
+{
+ const struct dpu_format *fmt;
+ const struct drm_plane *plane = &pdpu->base;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
+ struct dpu_hw_pipe_cfg pipe_cfg;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+ pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+ pstate->multirect_index);
+
+ /* override scaler/decimation if solid fill */
+ pipe_cfg.dst_rect = pstate->base.dst;
+
+ pipe_cfg.src_rect.x1 = 0;
+ pipe_cfg.src_rect.y1 = 0;
+ pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pipe_cfg.dst_rect);
+ pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pipe_cfg.dst_rect);
+
+ if (pdpu->pipe_hw->ops.setup_format)
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+ fmt, DPU_SSPP_SOLID_FILL,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_rects)
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pipe_cfg,
+ pstate->multirect_index);
+
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true, &pipe_cfg);
+ }
+
+ return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+ struct dpu_plane_state *pstate = to_dpu_plane_state(drm_state);
+
+ pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+ pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+ struct dpu_plane_state *pstate[R_MAX];
+ const struct drm_plane_state *drm_state[R_MAX];
+ struct drm_rect src[R_MAX], dst[R_MAX];
+ struct dpu_plane *dpu_plane[R_MAX];
+ const struct dpu_format *fmt[R_MAX];
+ int i, buffer_lines;
+ unsigned int max_tile_height = 1;
+ bool parallel_fetch_qualified = true;
+ bool has_tiled_rect = false;
+
+ for (i = 0; i < R_MAX; i++) {
+ const struct msm_format *msm_fmt;
+
+ drm_state[i] = i ? plane->r1 : plane->r0;
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ fmt[i] = to_dpu_format(msm_fmt);
+
+ if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+ has_tiled_rect = true;
+ if (fmt[i]->tile_height > max_tile_height)
+ max_tile_height = fmt[i]->tile_height;
+ }
+ }
+
+ for (i = 0; i < R_MAX; i++) {
+ int width_threshold;
+
+ pstate[i] = to_dpu_plane_state(drm_state[i]);
+ dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+ if (pstate[i] == NULL) {
+ DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+ drm_state[i]->plane->base.id);
+ return -EINVAL;
+ }
+
+ src[i].x1 = drm_state[i]->src_x >> 16;
+ src[i].y1 = drm_state[i]->src_y >> 16;
+ src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+ src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+ dst[i] = drm_plane_state_dest(drm_state[i]);
+
+ if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+ drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "scaling is not supported in multirect mode\n");
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(fmt[i])) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "Unsupported format for multirect mode\n");
+ return -EINVAL;
+ }
+
+ /**
+ * SSPP PD_MEM is split half - one for each RECT.
+ * Tiled formats need 5 lines of buffering while fetching
+ * whereas linear formats need only 2 lines.
+ * So we cannot support more than half of the supported SSPP
+ * width for tiled formats.
+ */
+ width_threshold = dpu_plane[i]->catalog->caps->max_linewidth;
+ if (has_tiled_rect)
+ width_threshold /= 2;
+
+ if (parallel_fetch_qualified &&
+ drm_rect_width(&src[i]) > width_threshold)
+ parallel_fetch_qualified = false;
+
+ }
+
+ /* Validate RECT's and set the mode */
+
+ /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+ if (parallel_fetch_qualified) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ goto done;
+ }
+
+ /* TIME_MX Mode */
+ buffer_lines = 2 * max_tile_height;
+
+ if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+ dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ } else {
+ DPU_ERROR(
+ "No multirect mode possible for the planes (%d - %d)\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ return -EINVAL;
+ }
+
+done:
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+
+ DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+ pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+ DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+ pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+ return 0;
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+ struct dpu_hw_fmt_layout layout;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+ /* cache aspace */
+ pstate->aspace = kms->base.aspace;
+
+ /*
+ * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+ * we can use msm_atomic_prepare_fb() instead of doing the
+ * implicit fence and fb prepare by hand here.
+ */
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ if (pstate->aspace) {
+ ret = msm_framebuffer_prepare(new_state->fb,
+ pstate->aspace, pstate->needs_dirtyfb);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
+ }
+ }
+
+ /* validate framebuffer layout before commit */
+ ret = dpu_format_populate_layout(pstate->aspace,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *old_pstate;
+
+ if (!old_state || !old_state->fb)
+ return;
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace,
+ old_pstate->needs_dirtyfb);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+ struct drm_rect *fb_rect,
+ uint32_t min_src_size)
+{
+ /* Ensure fb size is supported */
+ if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+ drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+ return false;
+
+ /* Ensure src rect is above the minimum size */
+ if (drm_rect_width(src) < min_src_size ||
+ drm_rect_height(src) < min_src_size)
+ return false;
+
+ /* Ensure src is fully encapsulated in fb */
+ return drm_rect_intersect(fb_rect, src) &&
+ drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
+ const struct dpu_sspp_sub_blks *sblk,
+ struct drm_rect src, const struct dpu_format *fmt)
+{
+ size_t num_formats;
+ const u32 *supported_formats;
+
+ if (!sblk->rotation_cfg) {
+ DPU_ERROR("invalid rotation cfg\n");
+ return -EINVAL;
+ }
+
+ if (drm_rect_width(&src) > sblk->rotation_cfg->rot_maxheight) {
+ DPU_DEBUG_PLANE(pdpu, "invalid height for inline rot:%d max:%d\n",
+ src.y2, sblk->rotation_cfg->rot_maxheight);
+ return -EINVAL;
+ }
+
+ supported_formats = sblk->rotation_cfg->rot_format_list;
+ num_formats = sblk->rotation_cfg->rot_num_formats;
+
+ if (!DPU_FORMAT_IS_UBWC(fmt) ||
+ !dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ int ret = 0, min_scale;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
+ const struct drm_crtc_state *crtc_state = NULL;
+ const struct dpu_format *fmt;
+ struct drm_rect src, dst, fb_rect = { 0 };
+ uint32_t min_src_size, max_linewidth;
+ unsigned int rotation;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps = pdpu->pipe_hw->cap;
+ const struct dpu_sspp_sub_blks *sblk = pdpu->pipe_hw->cap->sblk;
+
+ if (new_plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
+
+ min_scale = FRAC_16_16(1, sblk->maxupscale);
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min_scale,
+ sblk->maxdwnscale << 16,
+ true, true);
+ if (ret) {
+ DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ return ret;
+ }
+ if (!new_plane_state->visible)
+ return 0;
+
+ src.x1 = new_plane_state->src_x >> 16;
+ src.y1 = new_plane_state->src_y >> 16;
+ src.x2 = src.x1 + (new_plane_state->src_w >> 16);
+ src.y2 = src.y1 + (new_plane_state->src_h >> 16);
+
+ dst = drm_plane_state_dest(new_plane_state);
+
+ fb_rect.x2 = new_plane_state->fb->width;
+ fb_rect.y2 = new_plane_state->fb->height;
+
+ max_linewidth = pdpu->catalog->caps->max_linewidth;
+
+ fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb));
+
+ min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (!(pipe_hw_caps->features & DPU_SSPP_SCALER) ||
+ !(pipe_hw_caps->features & DPU_SSPP_CSC_ANY))) {
+ DPU_DEBUG_PLANE(pdpu,
+ "plane doesn't have scaler/csc for yuv\n");
+ return -EINVAL;
+
+ /* check src bounds */
+ } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ return -E2BIG;
+
+ /* valid yuv image */
+ } else if (DPU_FORMAT_IS_YUV(fmt) &&
+ (src.x1 & 0x1 || src.y1 & 0x1 ||
+ drm_rect_width(&src) & 0x1 ||
+ drm_rect_height(&src) & 0x1)) {
+ DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ return -EINVAL;
+
+ /* min dst support */
+ } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+ DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&dst));
+ return -EINVAL;
+
+ /* check decimated source width */
+ } else if (drm_rect_width(&src) > max_linewidth) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&src), max_linewidth);
+ return -E2BIG;
+ }
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
+
+ if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) &&
+ (rotation & DRM_MODE_ROTATE_90)) {
+ ret = dpu_plane_check_inline_rotation(pdpu, sblk, src, fmt);
+ if (ret)
+ return ret;
+ }
+
+ pstate->rotation = rotation;
+ pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
+
+ return 0;
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (pdpu->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+ else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+ else if (pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_csc) {
+ const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(plane->state->fb));
+ const struct dpu_csc_cfg *csc_ptr = _dpu_plane_get_csc(pdpu, fmt);
+
+ if (csc_ptr)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, csc_ptr);
+ }
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ * @error: error value to set
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = error;
+}
+
+static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+{
+ uint32_t src_flags;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ bool is_rt_pipe;
+ const struct dpu_format *fmt =
+ to_dpu_format(msm_framebuffer_format(fb));
+ struct dpu_hw_pipe_cfg pipe_cfg;
+
+ memset(&pipe_cfg, 0, sizeof(struct dpu_hw_pipe_cfg));
+
+ _dpu_plane_set_scanout(plane, pstate, &pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
+ pdpu->is_rt_pipe = is_rt_pipe;
+
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
+ crtc->base.id, DRM_RECT_ARG(&state->dst),
+ (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
+
+ pipe_cfg.src_rect = state->src;
+
+ /* state->src is 16.16, src_rect is not */
+ pipe_cfg.src_rect.x1 >>= 16;
+ pipe_cfg.src_rect.x2 >>= 16;
+ pipe_cfg.src_rect.y1 >>= 16;
+ pipe_cfg.src_rect.y2 >>= 16;
+
+ pipe_cfg.dst_rect = state->dst;
+
+ /* override for color fill */
+ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ return;
+ }
+
+ if (pdpu->pipe_hw->ops.setup_rects) {
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pipe_cfg,
+ pstate->multirect_index);
+ }
+
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false, &pipe_cfg);
+
+ if (pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(
+ pdpu->pipe_hw,
+ pstate->multirect_index,
+ pstate->multirect_mode);
+
+ if (pdpu->pipe_hw->ops.setup_format) {
+ unsigned int rotation = pstate->rotation;
+
+ src_flags = 0x0;
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ src_flags |= DPU_SSPP_FLIP_LR;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ src_flags |= DPU_SSPP_FLIP_UD;
+
+ if (rotation & DRM_MODE_ROTATE_90)
+ src_flags |= DPU_SSPP_ROT_90;
+
+ /* update format */
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_cdp) {
+ struct dpu_hw_cdp_cfg cdp_cfg;
+
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
+
+ cdp_cfg.enable = pdpu->catalog->perf->cdp_cfg
+ [DPU_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg.ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(fmt);
+ cdp_cfg.tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(fmt) ||
+ DPU_FORMAT_IS_TILE(fmt);
+ cdp_cfg.preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, &cdp_cfg, pstate->multirect_index);
+ }
+ }
+
+ _dpu_plane_set_qos_lut(plane, fb, &pipe_cfg);
+ _dpu_plane_set_danger_lut(plane, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+ _dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg);
+ }
+
+ if (pstate->needs_qos_remap) {
+ pstate->needs_qos_remap = false;
+ _dpu_plane_set_qos_remap(plane);
+ }
+
+ _dpu_plane_calc_bw(plane, fb, &pipe_cfg);
+
+ _dpu_plane_calc_clk(plane, &pipe_cfg);
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+
+ trace_dpu_plane_disable(DRMID(plane), false,
+ pstate->multirect_mode);
+
+ pstate->pending = true;
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+
+ pdpu->is_error = false;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (!new_state->visible) {
+ _dpu_plane_atomic_disable(plane);
+ } else {
+ dpu_plane_sspp_atomic_update(plane);
+ }
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (pdpu) {
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ mutex_destroy(&pdpu->lock);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+ kfree(pdpu);
+ }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_dpu_plane_state(state));
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_dpu_plane_state(plane->state);
+ pdpu = to_dpu_plane(plane);
+ pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return NULL;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ pstate->pending = false;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+ return &pstate->base;
+}
+
+static const char * const multirect_mode_name[] = {
+ [DPU_SSPP_MULTIRECT_NONE] = "none",
+ [DPU_SSPP_MULTIRECT_PARALLEL] = "parallel",
+ [DPU_SSPP_MULTIRECT_TIME_MX] = "time_mx",
+};
+
+static const char * const multirect_index_name[] = {
+ [DPU_SSPP_RECT_SOLO] = "solo",
+ [DPU_SSPP_RECT_0] = "rect_0",
+ [DPU_SSPP_RECT_1] = "rect_1",
+};
+
+static const char *dpu_get_multirect_mode(enum dpu_sspp_multirect_mode mode)
+{
+ if (WARN_ON(mode >= ARRAY_SIZE(multirect_mode_name)))
+ return "unknown";
+
+ return multirect_mode_name[mode];
+}
+
+static const char *dpu_get_multirect_index(enum dpu_sspp_multirect_index index)
+{
+ if (WARN_ON(index >= ARRAY_SIZE(multirect_index_name)))
+ return "unknown";
+
+ return multirect_index_name[index];
+}
+
+static void dpu_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ const struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ const struct dpu_plane *pdpu = to_dpu_plane(state->plane);
+
+ drm_printf(p, "\tstage=%d\n", pstate->stage);
+ drm_printf(p, "\tsspp=%s\n", pdpu->pipe_hw->cap->name);
+ drm_printf(p, "\tmultirect_mode=%s\n", dpu_get_multirect_mode(pstate->multirect_mode));
+ drm_printf(p, "\tmultirect_index=%s\n", dpu_get_multirect_index(pstate->multirect_index));
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ dpu_plane_destroy_state(plane, plane->state);
+ plane->state = NULL;
+ }
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return;
+ }
+
+ __drm_atomic_helper_plane_reset(plane, &pstate->base);
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+
+ if (!pdpu->is_rt_pipe)
+ return;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
+
+/* SSPP live inside dpu_plane private data only. Enumerate them here. */
+void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ struct drm_plane *plane;
+ struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
+
+ if (IS_ERR(entry))
+ return;
+
+ drm_for_each_plane(plane, dpu_kms->dev) {
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+ _dpu_hw_sspp_init_debugfs(pdpu->pipe_hw, dpu_kms, entry);
+ }
+}
+#endif
+
+static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED)
+ return dpu_find_format(format, qcom_compressed_supported_formats,
+ ARRAY_SIZE(qcom_compressed_supported_formats));
+
+ return false;
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_plane_duplicate_state,
+ .atomic_destroy_state = dpu_plane_destroy_state,
+ .atomic_print_state = dpu_plane_atomic_print_state,
+ .format_mod_supported = dpu_plane_format_mod_supported,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, enum drm_plane_type type,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ const uint32_t *format_list;
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
+ uint32_t num_formats;
+ uint32_t supported_rotations;
+ int ret = -EINVAL;
+
+ /* create and zero local structure */
+ pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+ if (!pdpu) {
+ DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
+
+ /* initialize underlying h/w driver */
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog);
+ if (IS_ERR(pdpu->pipe_hw)) {
+ DPU_ERROR("[%u]SSPP init failed\n", pipe);
+ ret = PTR_ERR(pdpu->pipe_hw);
+ goto clean_plane;
+ } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+ goto clean_sspp;
+ }
+
+ format_list = pdpu->pipe_hw->cap->sblk->format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
+
+ ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ format_list, num_formats,
+ supported_format_modifiers, type, NULL);
+ if (ret)
+ goto clean_sspp;
+
+ pdpu->catalog = kms->catalog;
+
+ ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX);
+ if (ret)
+ DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
+
+ if (pdpu->pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_MASK;
+
+ drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0, supported_rotations);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ mutex_init(&pdpu->lock);
+
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
+ return plane;
+
+clean_sspp:
+ if (pdpu && pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+ kfree(pdpu);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 000000000..b7b1b0519
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base: base drm plane state object
+ * @aspace: pointer to address space for input/output buffers
+ * @stage: assigned by crtc blender
+ * @needs_qos_remap: qos remap settings need to be updated
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending: whether the current update is still pending
+ * @plane_fetch_bw: calculated BW per plane
+ * @plane_clk: calculated clk per plane
+ * @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed
+ * @rotation: simplified drm rotation hint
+ */
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ struct msm_gem_address_space *aspace;
+ enum dpu_stage stage;
+ bool needs_qos_remap;
+ uint32_t multirect_index;
+ uint32_t multirect_mode;
+ bool pending;
+
+ u64 plane_fetch_bw;
+ u64 plane_clk;
+
+ bool needs_dirtyfb;
+ unsigned int rotation;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+ const struct drm_plane_state *r0;
+ const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+ container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, enum drm_plane_type type,
+ unsigned long possible_crtcs);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ * against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
+#else
+static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
+#endif
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 000000000..58abf5fe9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_hw_dsc.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+
+static inline bool reserved_by_other(uint32_t *res_map, int idx,
+ uint32_t enc_id)
+{
+ return res_map[idx] && res_map[idx] != enc_id;
+}
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @topology: selected topology for the display
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+ struct msm_display_topology topology;
+};
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
+ struct dpu_hw_dspp *hw;
+
+ if (rm->dspp_blks[i]) {
+ hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
+ dpu_hw_dspp_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
+ struct dpu_hw_pingpong *hw;
+
+ if (rm->pingpong_blks[i]) {
+ hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
+ dpu_hw_pingpong_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
+ struct dpu_hw_merge_3d *hw;
+
+ if (rm->merge_3d_blks[i]) {
+ hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
+ dpu_hw_merge_3d_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
+ struct dpu_hw_mixer *hw;
+
+ if (rm->mixer_blks[i]) {
+ hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
+ dpu_hw_lm_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
+ struct dpu_hw_ctl *hw;
+
+ if (rm->ctl_blks[i]) {
+ hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
+ dpu_hw_ctl_destroy(hw);
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
+ dpu_hw_intf_destroy(rm->hw_intf[i]);
+
+ for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
+ struct dpu_hw_dsc *hw;
+
+ if (rm->dsc_blks[i]) {
+ hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
+ dpu_hw_dsc_destroy(hw);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
+ dpu_hw_wb_destroy(rm->hw_wb[i]);
+
+ return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+ const struct dpu_mdss_cfg *cat,
+ void __iomem *mmio)
+{
+ int rc, i;
+
+ if (!rm || !cat || !mmio) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_hw_mixer *hw;
+ const struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ if (lm->id < LM_0 || lm->id >= LM_MAX) {
+ DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
+ continue;
+ }
+ hw = dpu_hw_lm_init(lm->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed lm object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->mixer_blks[lm->id - LM_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->merge_3d_count; i++) {
+ struct dpu_hw_merge_3d *hw;
+ const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
+
+ if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) {
+ DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id);
+ continue;
+ }
+ hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed merge_3d object creation: err %d\n",
+ rc);
+ goto fail;
+ }
+ rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ struct dpu_hw_pingpong *hw;
+ const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
+
+ if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
+ DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
+ continue;
+ }
+ hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed pingpong object creation: err %d\n",
+ rc);
+ goto fail;
+ }
+ if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
+ hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
+ rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ struct dpu_hw_intf *hw;
+ const struct dpu_intf_cfg *intf = &cat->intf[i];
+
+ if (intf->type == INTF_NONE) {
+ DPU_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+ if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
+ DPU_ERROR("skip intf %d with invalid id\n", intf->id);
+ continue;
+ }
+ hw = dpu_hw_intf_init(intf->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed intf object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->hw_intf[intf->id - INTF_0] = hw;
+ }
+
+ for (i = 0; i < cat->wb_count; i++) {
+ struct dpu_hw_wb *hw;
+ const struct dpu_wb_cfg *wb = &cat->wb[i];
+
+ if (wb->id < WB_0 || wb->id >= WB_MAX) {
+ DPU_ERROR("skip intf %d with invalid id\n", wb->id);
+ continue;
+ }
+
+ hw = dpu_hw_wb_init(wb->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed wb object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->hw_wb[wb->id - WB_0] = hw;
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ struct dpu_hw_ctl *hw;
+ const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
+
+ if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
+ DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
+ continue;
+ }
+ hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed ctl object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->dspp_count; i++) {
+ struct dpu_hw_dspp *hw;
+ const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
+
+ if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
+ DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
+ continue;
+ }
+ hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dspp object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
+ }
+
+ for (i = 0; i < cat->dsc_count; i++) {
+ struct dpu_hw_dsc *hw;
+ const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
+
+ hw = dpu_hw_dsc_init(dsc->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dsc object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
+ }
+
+ return 0;
+
+fail:
+ dpu_rm_destroy(rm);
+
+ return rc ? rc : -EFAULT;
+}
+
+static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
+{
+ return top->num_intf > 1;
+}
+
+/**
+ * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
+ * @rm: dpu resource manager handle
+ * @primary_idx: index of primary mixer in rm->mixer_blks[]
+ * @peer_idx: index of other mixer in rm->mixer_blks[]
+ * Return: true if rm->mixer_blks[peer_idx] is a peer of
+ * rm->mixer_blks[primary_idx]
+ */
+static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
+ int peer_idx)
+{
+ const struct dpu_lm_cfg *prim_lm_cfg;
+ const struct dpu_lm_cfg *peer_cfg;
+
+ prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
+ peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
+
+ if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
+ peer_cfg->id);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong
+ * @rm: dpu resource manager handle
+ * @global_state: resources shared across multiple kms objects
+ * @enc_id: encoder id requesting for allocation
+ * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
+ * if lm, and all other hardwired blocks connected to the lm (pp) is
+ * available and appropriate
+ * @pp_idx: output parameter, index of pingpong block attached to the layer
+ * mixer in rm->pingpong_blks[].
+ * @dspp_idx: output parameter, index of dspp block attached to the layer
+ * mixer in rm->dspp_blks[].
+ * @reqs: input parameter, rm requirements for HW blocks needed in the
+ * datapath.
+ * Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ struct dpu_rm_requirements *reqs)
+{
+ const struct dpu_lm_cfg *lm_cfg;
+ int idx;
+
+ /* Already reserved? */
+ if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
+ return false;
+ }
+
+ lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
+ idx = lm_cfg->pingpong - PINGPONG_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
+ DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
+ lm_cfg->pingpong);
+ return false;
+ }
+ *pp_idx = idx;
+
+ if (!reqs->topology.num_dspp)
+ return true;
+
+ idx = lm_cfg->dspp - DSPP_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
+ DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
+ return false;
+ }
+
+ if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
+ lm_cfg->dspp);
+ return false;
+ }
+ *dspp_idx = idx;
+
+ return true;
+}
+
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ struct dpu_rm_requirements *reqs)
+
+{
+ int lm_idx[MAX_BLOCKS];
+ int pp_idx[MAX_BLOCKS];
+ int dspp_idx[MAX_BLOCKS] = {0};
+ int i, j, lm_count = 0;
+
+ if (!reqs->topology.num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; i++) {
+ if (!rm->mixer_blks[i])
+ continue;
+
+ lm_count = 0;
+ lm_idx[lm_count] = i;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
+ enc_id, i, &pp_idx[lm_count],
+ &dspp_idx[lm_count], reqs)) {
+ continue;
+ }
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
+ lm_count < reqs->topology.num_lm; j++) {
+ if (!rm->mixer_blks[j])
+ continue;
+
+ if (!_dpu_rm_check_lm_peer(rm, i, j)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
+ LM_0 + i);
+ continue;
+ }
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
+ global_state, enc_id, j,
+ &pp_idx[lm_count], &dspp_idx[lm_count],
+ reqs)) {
+ continue;
+ }
+
+ lm_idx[lm_count] = j;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->topology.num_lm) {
+ DPU_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < lm_count; i++) {
+ global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
+ global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
+ global_state->dspp_to_enc_id[dspp_idx[i]] =
+ reqs->topology.num_dspp ? enc_id : 0;
+
+ trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
+ pp_idx[i] + PINGPONG_0);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_ctls(
+ struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ const struct msm_display_topology *top)
+{
+ int ctl_idx[MAX_BLOCKS];
+ int i = 0, j, num_ctls;
+ bool needs_split_display;
+
+ /* each hw_intf needs its own hw_ctrl to program its control path */
+ num_ctls = top->num_intf;
+
+ needs_split_display = _dpu_rm_needs_split_display(top);
+
+ for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
+ const struct dpu_hw_ctl *ctl;
+ unsigned long features;
+ bool has_split_display;
+
+ if (!rm->ctl_blks[j])
+ continue;
+ if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
+ continue;
+
+ ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
+ features = ctl->caps->features;
+ has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+ DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
+
+ if (needs_split_display != has_split_display)
+ continue;
+
+ ctl_idx[i] = j;
+ DPU_DEBUG("ctl %d match\n", j + CTL_0);
+
+ if (++i == num_ctls)
+ break;
+
+ }
+
+ if (i != num_ctls)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
+ global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
+ trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = top->num_dsc;
+ int i;
+
+ /* check if DSC required are allocated or not */
+ for (i = 0; i < num_dsc; i++) {
+ if (global_state->dsc_to_enc_id[i]) {
+ DPU_ERROR("DSC %d is already allocated\n", i);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < num_dsc; i++)
+ global_state->dsc_to_enc_id[i] = enc->base.id;
+
+ return 0;
+}
+
+static int _dpu_rm_make_reservation(
+ struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ struct dpu_rm_requirements *reqs)
+{
+ int ret;
+
+ ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
+ &reqs->topology);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+ struct drm_encoder *enc,
+ struct dpu_rm_requirements *reqs,
+ struct msm_display_topology req_topology)
+{
+ reqs->topology = req_topology;
+
+ DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
+ reqs->topology.num_lm, reqs->topology.num_enc,
+ reqs->topology.num_intf);
+
+ return 0;
+}
+
+static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
+ uint32_t enc_id)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ if (res_mapping[i] == enc_id)
+ res_mapping[i] = 0;
+ }
+}
+
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc)
+{
+ _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
+ ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
+ ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
+ ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
+ ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
+ ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
+}
+
+int dpu_rm_reserve(
+ struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct msm_display_topology topology)
+{
+ struct dpu_rm_requirements reqs;
+ int ret;
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ if (IS_ERR(global_state)) {
+ DPU_ERROR("failed to global state\n");
+ return PTR_ERR(global_state);
+ }
+
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
+ enc->base.id, crtc_state->crtc->base.id);
+
+ ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
+ if (ret) {
+ DPU_ERROR("failed to populate hw requirements\n");
+ return ret;
+ }
+
+ ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
+ if (ret)
+ DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+
+
+
+ return ret;
+}
+
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
+{
+ struct dpu_hw_blk **hw_blks;
+ uint32_t *hw_to_enc_id;
+ int i, num_blks, max_blks;
+
+ switch (type) {
+ case DPU_HW_BLK_PINGPONG:
+ hw_blks = rm->pingpong_blks;
+ hw_to_enc_id = global_state->pingpong_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->pingpong_blks);
+ break;
+ case DPU_HW_BLK_LM:
+ hw_blks = rm->mixer_blks;
+ hw_to_enc_id = global_state->mixer_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->mixer_blks);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw_blks = rm->ctl_blks;
+ hw_to_enc_id = global_state->ctl_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->ctl_blks);
+ break;
+ case DPU_HW_BLK_DSPP:
+ hw_blks = rm->dspp_blks;
+ hw_to_enc_id = global_state->dspp_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dspp_blks);
+ break;
+ case DPU_HW_BLK_DSC:
+ hw_blks = rm->dsc_blks;
+ hw_to_enc_id = global_state->dsc_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dsc_blks);
+ break;
+ default:
+ DPU_ERROR("blk type %d not managed by rm\n", type);
+ return 0;
+ }
+
+ num_blks = 0;
+ for (i = 0; i < max_blks; i++) {
+ if (hw_to_enc_id[i] != enc_id)
+ continue;
+
+ if (num_blks == blks_size) {
+ DPU_ERROR("More than %d resources assigned to enc %d\n",
+ blks_size, enc_id);
+ break;
+ }
+ if (!hw_blks[i]) {
+ DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
+ type, enc_id);
+ break;
+ }
+ blks[num_blks++] = hw_blks[i];
+ }
+
+ return num_blks;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 000000000..59de72b38
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+struct dpu_global_state;
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @pingpong_blks: array of pingpong hardware resources
+ * @mixer_blks: array of layer mixer hardware resources
+ * @ctl_blks: array of ctl hardware resources
+ * @hw_intf: array of intf hardware resources
+ * @hw_wb: array of wb hardware resources
+ * @dspp_blks: array of dspp hardware resources
+ */
+struct dpu_rm {
+ struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0];
+ struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
+ struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
+ struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
+ struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
+ struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
+ struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
+ struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+ const struct dpu_mdss_cfg *cat,
+ void __iomem *mmio);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @topology: Pointer to topology info for the display
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct msm_display_topology topology);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_global_state *global_state,
+ struct drm_encoder *enc);
+
+/**
+ * Get hw resources of the given type that are assigned to this encoder.
+ */
+int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ struct dpu_global_state *global_state, uint32_t enc_id,
+ enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);
+
+/**
+ * dpu_rm_get_intf - Return a struct dpu_hw_intf instance given it's index.
+ * @rm: DPU Resource Manager handle
+ * @intf_idx: INTF's index
+ */
+static inline struct dpu_hw_intf *dpu_rm_get_intf(struct dpu_rm *rm, enum dpu_intf intf_idx)
+{
+ return rm->hw_intf[intf_idx - INTF_0];
+}
+
+/**
+ * dpu_rm_get_wb - Return a struct dpu_hw_wb instance given it's index.
+ * @rm: DPU Resource Manager handle
+ * @wb_idx: WB index
+ */
+static inline struct dpu_hw_wb *dpu_rm_get_wb(struct dpu_rm *rm, enum dpu_wb wb_idx)
+{
+ return rm->hw_wb[wb_idx - WB_0];
+}
+
+#endif /* __DPU_RM_H__ */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 000000000..76169f406
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,978 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u64, lut)
+ __field(u32, lut_usage)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->lut_usage = lut_usage;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
+ bool stop_req, bool update_bus, bool update_clk),
+ TP_ARGS(crtc, bw_ctl, core_clk_rate, stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl = bw_ctl;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk(
+ "crtc=%d bw_ctl=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_irq_template,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("irq=%d", __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_irq_template, dpu_irq_register_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
+);
+DEFINE_EVENT(dpu_irq_template, dpu_irq_unregister_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, void *func,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, func, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( void *, func )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->func = func;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->func,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags),
+ TP_ARGS(drm_id, flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ ),
+ TP_printk("id=%u, flags=%u",
+ __entry->drm_id, __entry->flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, char *intf_mode, enum dpu_intf intf_idx,
+ enum dpu_wb wb_idx),
+ TP_ARGS(drm_id, event, intf_mode, intf_idx, wb_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __string( intf_mode_str, intf_mode )
+ __field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __assign_str(intf_mode_str, intf_mode);
+ __entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf_mode=%s intf=%d wb=%d", __entry->drm_id,
+ __entry->event, __get_str(intf_mode_str),
+ __entry->intf_idx, __entry->wb_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, char *intf_mode, enum dpu_intf intf_idx, enum dpu_wb wb_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
+ u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_mode, intf_idx, wb_idx, pending_kickoff_cnt, ctl_idx,
+ extra_flush_bits, pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __string( intf_mode_str, intf_mode )
+ __field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, extra_flush_bits )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __assign_str(intf_mode_str, intf_mode);
+ __entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->extra_flush_bits = extra_flush_bits;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_mode=%s, intf_idx=%d, wb_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "extra_flush_bits=0x%x pending_flush_ret=0x%x",
+ __entry->drm_id, __get_str(intf_mode_str), __entry->intf_idx, __entry->wb_idx,
+ __entry->pending_kickoff_cnt, __entry->ctl_idx,
+ __entry->extra_flush_bits, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( ktime_t, time )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->time = time;
+ ),
+ TP_printk("id=%u, time=%lld", __entry->drm_id,
+ ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int irq_idx, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, irq_idx, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, irq_idx )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->irq_idx = irq_idx;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, irq_idx=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->irq_idx, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+ __entry->pp, __entry->enable ? "true" : "false",
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+ u32 event),
+ TP_ARGS(drm_id, pp, new_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, new_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->new_count = new_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+ __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+ int kickoff_count, u32 event),
+ TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, timeout_count )
+ __field( int, kickoff_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->timeout_count = timeout_count;
+ __entry->kickoff_count = kickoff_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+ __entry->drm_id, __entry->pp, __entry->timeout_count,
+ __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+ __entry->intf_idx, __entry->enable ? "true" : "false",
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+ TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+ struct drm_plane_state *state, struct dpu_plane_state *pstate,
+ uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+ uint64_t modifier),
+ TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+ pixel_format, modifier),
+ TP_STRUCT__entry(
+ __field( uint32_t, crtc_id )
+ __field( uint32_t, plane_id )
+ __field( uint32_t, fb_id )
+ __field_struct( struct drm_rect, src_rect )
+ __field_struct( struct drm_rect, dst_rect )
+ __field( uint32_t, stage_idx )
+ __field( enum dpu_stage, stage )
+ __field( enum dpu_sspp, sspp )
+ __field( uint32_t, multirect_idx )
+ __field( uint32_t, multirect_mode )
+ __field( uint32_t, pixel_format )
+ __field( uint64_t, modifier )
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __entry->plane_id = plane_id;
+ __entry->fb_id = state ? state->fb->base.id : 0;
+ __entry->src_rect = drm_plane_state_src(state);
+ __entry->dst_rect = drm_plane_state_dest(state);
+ __entry->stage_idx = stage_idx;
+ __entry->stage = pstate->stage;
+ __entry->sspp = sspp;
+ __entry->multirect_idx = pstate->multirect_index;
+ __entry->multirect_mode = pstate->multirect_mode;
+ __entry->pixel_format = pixel_format;
+ __entry->modifier = modifier;
+ ),
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT
+ " dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d "
+ "multirect_index:%d multirect_mode:%u pix_format:%u "
+ "modifier:%llu",
+ __entry->crtc_id, __entry->plane_id, __entry->fb_id,
+ DRM_RECT_FP_ARG(&__entry->src_rect),
+ DRM_RECT_ARG(&__entry->dst_rect),
+ __entry->stage_idx, __entry->stage, __entry->sspp,
+ __entry->multirect_idx, __entry->multirect_mode,
+ __entry->pixel_format, __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+ TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+ TP_ARGS(drm_id, mixer, bounds),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, mixer )
+ __field_struct( struct drm_rect, bounds )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->mixer = mixer;
+ __entry->bounds = *bounds;
+ ),
+ TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+ __entry->mixer, DRM_RECT_ARG(&__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+ TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+ struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enc_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( uint32_t, enc_id )
+ __field( bool, enable )
+ __field( bool, enabled )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enc_id = enc_id;
+ __entry->enable = enable;
+ __entry->enabled = crtc->enabled;
+ ),
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
+ __entry->drm_id, __entry->enc_id,
+ __entry->enable ? "true" : "false",
+ __entry->enabled ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ __field( bool, enabled )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ __entry->enabled = crtc->enabled;
+ ),
+ TP_printk("id:%u enable:%s state{enabled:%s}",
+ __entry->drm_id, __entry->enable ? "true" : "false",
+ __entry->enabled ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+ TP_PROTO(uint32_t drm_id, int frame_pending),
+ TP_ARGS(drm_id, frame_pending),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, frame_pending )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->frame_pending = frame_pending;
+ ),
+ TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+ __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+ TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+ enum dpu_sspp_multirect_index multirect_index),
+ TP_ARGS(index, layout, multirect_index),
+ TP_STRUCT__entry(
+ __field( enum dpu_sspp, index )
+ __field_struct( struct dpu_hw_fmt_layout, layout )
+ __field( enum dpu_sspp_multirect_index, multirect_index)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->layout = *layout;
+ __entry->multirect_index = multirect_index;
+ ),
+ TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+ "multirect_index:%d", __entry->index, __entry->layout.width,
+ __entry->layout.height, __entry->layout.plane_addr[0],
+ __entry->layout.plane_size[0],
+ __entry->layout.plane_addr[1],
+ __entry->layout.plane_size[1],
+ __entry->layout.plane_addr[2],
+ __entry->layout.plane_size[2],
+ __entry->layout.plane_addr[3],
+ __entry->layout.plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+ TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+ TP_ARGS(drm_id, is_virtual, multirect_mode),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, is_virtual )
+ __field( uint32_t, multirect_mode )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->is_virtual = is_virtual;
+ __entry->multirect_mode = multirect_mode;
+ ),
+ TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+ __entry->is_virtual ? "true" : "false",
+ __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( uint32_t, enc_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->enc_id = enc_id;
+ ),
+ TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+ TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id),
+ TP_ARGS(id, enc_id, pp_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( uint32_t, enc_id )
+ __field( uint32_t, pp_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->enc_id = enc_id;
+ __entry->pp_id = pp_id;
+ ),
+ TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+ TP_PROTO(enum dpu_vbif index, u32 xin_id),
+ TP_ARGS(index, xin_id),
+ TP_STRUCT__entry(
+ __field( enum dpu_vbif, index )
+ __field( u32, xin_id )
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->xin_id = xin_id;
+ ),
+ TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+ TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+ TP_ARGS(pp, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_pingpong, pp )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->pp = pp;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+TRACE_EVENT(dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, void *callback),
+ TP_ARGS(irq_idx, callback),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( void *, callback)
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->callback = callback;
+ ),
+ TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx,
+ __entry->callback)
+);
+
+TRACE_EVENT(dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("irq_idx:%d", __entry->irq_idx)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+ TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+ TP_ARGS(dev, stop_req, clk_rate),
+ TP_STRUCT__entry(
+ __string( dev_name, dev->unique )
+ __field( bool, stop_req )
+ __field( u64, clk_rate )
+ ),
+ TP_fast_assign(
+ __assign_str(dev_name, dev->unique);
+ __entry->stop_req = stop_req;
+ __entry->clk_rate = clk_rate;
+ ),
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name),
+ __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+ TP_PROTO(u32 new_bits, u32 pending_mask),
+ TP_ARGS(new_bits, pending_mask),
+ TP_STRUCT__entry(
+ __field( u32, new_bits )
+ __field( u32, pending_mask )
+ ),
+ TP_fast_assign(
+ __entry->new_bits = new_bits;
+ __entry->pending_mask = pending_mask;
+ ),
+ TP_printk("new=%x existing=%x", __entry->new_bits,
+ __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush),
+ TP_STRUCT__entry(
+ __field( u32, pending_mask )
+ __field( u32, ctl_flush )
+ ),
+ TP_fast_assign(
+ __entry->pending_mask = pending_mask;
+ __entry->ctl_flush = ctl_flush;
+ ),
+ TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+ __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+ dpu_hw_ctl_trigger_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+ trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 000000000..1305e250b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
+{
+ if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
+ return dpu_kms->hw_vbif[vbif_idx];
+
+ return NULL;
+}
+
+static const char *dpu_vbif_name(enum dpu_vbif idx)
+{
+ switch (idx) {
+ case VBIF_RT:
+ return "VBIF_RT";
+ case VBIF_NRT:
+ return "VBIF_NRT";
+ default:
+ return "??";
+ }
+}
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
+ dpu_vbif_name(vbif->idx), xin_id);
+ } else {
+ rc = 0;
+ DRM_DEBUG_ATOMIC("%s client %d is halted\n",
+ dpu_vbif_name(vbif->idx), xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+ u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct dpu_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ dpu_vbif_name(vbif->idx), params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+ struct dpu_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
+ dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @dpu_kms: DPU handler
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params)
+{
+ struct dpu_hw_vbif *vbif;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret;
+
+ mdp = dpu_kms->hw_mdp;
+
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
+ if (!vbif || !mdp) {
+ DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
+ vbif != NULL, mdp != NULL);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+ ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ return;
+
+ trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params)
+{
+ struct dpu_hw_vbif *vbif;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ const struct dpu_vbif_qos_tbl *qos_tbl;
+ int i;
+
+ if (!params || !dpu_kms->hw_mdp) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+ return;
+ }
+
+ if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ DRM_DEBUG_ATOMIC("qos remap not supported\n");
+ return;
+ }
+
+ qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+ &vbif->cap->qos_nrt_tbl;
+
+ if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+ DRM_DEBUG_ATOMIC("qos tbl not defined\n");
+ return;
+ }
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+ DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
+ dpu_vbif_name(params->vbif_idx), params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ }
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ u32 i, pnd, src;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->ops.clear_errors) {
+ vbif->ops.clear_errors(vbif, &pnd, &src);
+ if (pnd || src) {
+ DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
+ dpu_vbif_name(vbif->idx), pnd, src);
+ }
+ }
+ }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *entry, *debugfs_vbif;
+ int i, j;
+
+ entry = debugfs_create_dir("vbif", debugfs_root);
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name, entry);
+
+ debugfs_create_u32("features", 0600, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ const struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 000000000..ab490177d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ u32 num;
+ bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms: DPU handler
+ * @params: Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
new file mode 100644
index 000000000..2a5a68366
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "dpu_writeback.h"
+
+static int dpu_wb_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+
+ /*
+ * We should ideally be limiting the modes only to the maxlinewidth but
+ * on some chipsets this will allow even 4k modes to be added which will
+ * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support
+ * and source split support added lets limit the modes based on max_mixer_width
+ * as 4K modes can then be supported.
+ */
+ return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width,
+ dev->mode_config.max_height);
+}
+
+static const struct drm_connector_funcs dpu_wb_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int dpu_wb_conn_prepare_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return 0;
+
+ dpu_encoder_prepare_wb_job(dpu_wb_conn->wb_enc, job);
+
+ return 0;
+}
+
+static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return;
+
+ dpu_encoder_cleanup_wb_job(dpu_wb_conn->wb_enc, job);
+}
+
+static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
+ .get_modes = dpu_wb_conn_get_modes,
+ .prepare_writeback_job = dpu_wb_conn_prepare_job,
+ .cleanup_writeback_job = dpu_wb_conn_cleanup_job,
+};
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats)
+{
+ struct dpu_wb_connector *dpu_wb_conn;
+ int rc = 0;
+
+ dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
+ if (!dpu_wb_conn)
+ return -ENOMEM;
+
+ drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
+
+ /* DPU initializes the encoder and sets it up completely for writeback
+ * cases and hence should use the new API drm_writeback_connector_init_with_encoder
+ * to initialize the writeback connector
+ */
+ rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
+ &dpu_wb_conn_funcs, format_list, num_formats);
+
+ if (!rc)
+ dpu_wb_conn->wb_enc = enc;
+
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
new file mode 100644
index 000000000..5a75ea916
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_WRITEBACK_H
+#define _DPU_WRITEBACK_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_encoder_phys.h"
+
+struct dpu_wb_connector {
+ struct drm_writeback_connector base;
+ struct drm_encoder *wb_enc;
+};
+
+static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn)
+{
+ return container_of(conn, struct dpu_wb_connector, base);
+}
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats);
+
+#endif /*_DPU_WRITEBACK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 000000000..9fc9dbde8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1155 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV12,
+
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV21,
+ /* Venus NV12_MVTB:
+ * Two YUV 4:2:0 images/views one after the other
+ * in a top-bottom layout, same as NV12
+ * with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_1
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_2
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * View_1 begin at: 0 (zero)
+ * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((2*(Y_Stride * Y_Scanlines)
+ * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+ */
+ COLOR_FMT_NV12_MVTB,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+ * + max(Extradata, Y_TF_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Plane_size + Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 128
+ * UV_Stride : Width * 2 aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010 COLOR_FMT_P010
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+ unsigned int stride = 0;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 128);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
+ break;
+ case COLOR_FMT_P010:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
+ break;
+ }
+
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int stride = 0;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 128);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
+ break;
+ case COLOR_FMT_P010:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
+ break;
+ }
+
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int sclines = 0;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ sclines = MSM_MEDIA_ALIGN(height, 32);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ sclines = MSM_MEDIA_ALIGN(height, 16);
+ break;
+ }
+
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+ unsigned int sclines = 0;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
+ break;
+ }
+
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+ int y_tile_width = 0, y_meta_stride;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ return 0;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ return MSM_MEDIA_ALIGN(y_meta_stride, 64);
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+ int y_tile_height = 0, y_meta_scanlines;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ return 0;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+ int uv_tile_width = 0, uv_meta_stride;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ return 0;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ return 0;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+}
+
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment = 0, bpp = 4;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ return 0;
+ }
+
+ return MSM_MEDIA_ALIGN(width * bpp, alignment);
+}
+
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment = 0;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+
+ return MSM_MEDIA_ALIGN(height, alignment);
+}
+
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+ int rgb_meta_stride;
+
+ if (!width)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+ return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+ }
+
+ return 0;
+}
+
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+ int rgb_meta_scanlines;
+
+ if (!height)
+ return 0;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+ return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+ }
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
new file mode 100644
index 000000000..a2b642294
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -0,0 +1,1181 @@
+#ifndef MDP4_XML
+#define MDP4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp4_pipe {
+ VG1 = 0,
+ VG2 = 1,
+ RGB1 = 2,
+ RGB2 = 3,
+ RGB3 = 4,
+ VG3 = 5,
+ VG4 = 6,
+};
+
+enum mdp4_mixer {
+ MIXER0 = 0,
+ MIXER1 = 1,
+ MIXER2 = 2,
+};
+
+enum mdp4_intf {
+ INTF_LCDC_DTV = 0,
+ INTF_DSI_VIDEO = 1,
+ INTF_DSI_CMD = 2,
+ INTF_EBI2_TV = 3,
+};
+
+enum mdp4_cursor_format {
+ CURSOR_ARGB = 1,
+ CURSOR_XRGB = 2,
+};
+
+enum mdp4_frame_format {
+ FRAME_LINEAR = 0,
+ FRAME_TILE_ARGB_4X4 = 1,
+ FRAME_TILE_YCBCR_420 = 2,
+};
+
+enum mdp4_scale_unit {
+ SCALE_FIR = 0,
+ SCALE_MN_PHASE = 1,
+ SCALE_PIXEL_RPT = 2,
+};
+
+enum mdp4_dma {
+ DMA_P = 0,
+ DMA_S = 1,
+ DMA_E = 2,
+};
+
+#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
+#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
+#define MDP4_IRQ_DMA_S_DONE 0x00000004
+#define MDP4_IRQ_DMA_E_DONE 0x00000008
+#define MDP4_IRQ_DMA_P_DONE 0x00000010
+#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
+#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
+#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
+#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
+#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
+#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
+#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
+#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
+#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
+#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
+#define REG_MDP4_VERSION 0x00000000
+#define MDP4_VERSION_MINOR__MASK 0x00ff0000
+#define MDP4_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
+}
+#define MDP4_VERSION_MAJOR__MASK 0xff000000
+#define MDP4_VERSION_MAJOR__SHIFT 24
+static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP4_OVLP0_KICK 0x00000004
+
+#define REG_MDP4_OVLP1_KICK 0x00000008
+
+#define REG_MDP4_OVLP2_KICK 0x000000d0
+
+#define REG_MDP4_DMA_P_KICK 0x0000000c
+
+#define REG_MDP4_DMA_S_KICK 0x00000010
+
+#define REG_MDP4_DMA_E_KICK 0x00000014
+
+#define REG_MDP4_DISP_STATUS 0x00000018
+
+#define REG_MDP4_DISP_INTF_SEL 0x00000038
+#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
+#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
+static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
+}
+#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
+#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
+static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
+}
+#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
+#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
+static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
+}
+#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
+#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
+
+#define REG_MDP4_RESET_STATUS 0x0000003c
+
+#define REG_MDP4_READ_CNFG 0x0000004c
+
+#define REG_MDP4_INTR_ENABLE 0x00000050
+
+#define REG_MDP4_INTR_STATUS 0x00000054
+
+#define REG_MDP4_INTR_CLEAR 0x00000058
+
+#define REG_MDP4_EBI2_LCD0 0x00000060
+
+#define REG_MDP4_EBI2_LCD1 0x00000064
+
+#define REG_MDP4_PORTMAP_MODE 0x00000070
+
+#define REG_MDP4_CS_CONTROLLER0 0x000000c0
+
+#define REG_MDP4_CS_CONTROLLER1 0x000000c4
+
+#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
+
+#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
+
+#define REG_MDP4_VG2_CONST_COLOR 0x00031008
+
+#define REG_MDP4_OVERLAY_FLUSH 0x00018000
+#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
+#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
+#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
+#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
+#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
+#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
+
+static inline uint32_t __offset_OVLP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00010000;
+ case 1: return 0x00018000;
+ case 2: return 0x00088000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
+#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
+}
+#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
+
+static inline uint32_t __offset_STAGE(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000104;
+ case 1: return 0x00000124;
+ case 2: return 0x00000144;
+ case 3: return 0x00000160;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
+#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
+#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
+#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
+#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00001004;
+ case 1: return 0x00001404;
+ case 2: return 0x00001804;
+ case 3: return 0x00001b84;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
+
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+#define REG_MDP4_DMA_P_OP_MODE 0x00090070
+
+static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
+
+static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
+
+static inline uint32_t __offset_DMA(enum mdp4_dma idx)
+{
+ switch (idx) {
+ case DMA_P: return 0x00090000;
+ case DMA_S: return 0x000a0000;
+ case DMA_E: return 0x000b0000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
+#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
+#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
+#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
+#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
+#define MDP4_DMA_CONFIG_PACK__SHIFT 8
+static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
+}
+#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
+#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
+
+static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
+#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
+#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
+}
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
+#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
+}
+#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
+#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
+static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
+{
+ return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
+}
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
+
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_DST_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
+}
+#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_DST_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000
+#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
+#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000
+#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000
+#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
+#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
+#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c
+#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2
+static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val)
+{
+ return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK;
+}
+#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030
+#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4
+static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val)
+{
+ return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK;
+}
+#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
+#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
+#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
+#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
+#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
+#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
+#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
+#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
+#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
+
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+#define REG_MDP4_LCDC 0x000c0000
+
+#define REG_MDP4_LCDC_ENABLE 0x000c0000
+
+#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
+
+#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
+
+#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
+#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
+
+#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
+
+#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
+#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
+
+#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
+
+#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
+
+#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
+
+#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
+
+#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
+#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000
+#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004
+#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008
+#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040
+#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000
+#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000
+
+static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
+
+static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; }
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000
+#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK;
+}
+
+static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; }
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK;
+}
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000
+#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16
+static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK;
+}
+
+#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020
+
+#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024
+
+#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080
+
+#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108
+
+#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100
+#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010
+#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040
+#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080
+
+#define REG_MDP4_DTV 0x000d0000
+
+#define REG_MDP4_DTV_ENABLE 0x000d0000
+
+#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
+
+#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
+
+#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
+#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
+
+#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
+
+#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
+#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
+
+#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
+
+#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
+
+#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
+
+#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
+
+#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
+#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DSI 0x000e0000
+
+#define REG_MDP4_DSI_ENABLE 0x000e0000
+
+#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
+
+#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
+
+#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
+#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
+
+#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
+
+#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
+#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
+
+#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
+
+#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
+
+#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
+
+#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
+
+#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
+#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+
+#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
new file mode 100644
index 000000000..310095722
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "mdp4_kms.h"
+#include "msm_gem.h"
+
+struct mdp4_crtc {
+ struct drm_crtc base;
+ char name[8];
+ int id;
+ int ovlp;
+ enum mdp4_dma dma;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ struct {
+ spinlock_t lock;
+ bool stale;
+ uint32_t width, height;
+ uint32_t x, y;
+
+ /* next cursor to scan-out: */
+ uint32_t next_iova;
+ struct drm_gem_object *next_bo;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ } cursor;
+
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+
+ /* Bits have been flushed at the last commit,
+ * used to decide if a vsync has happened since last commit.
+ */
+ u32 flushed_mask;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+};
+#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
+
+static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ uint32_t flush = 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_crtc->flushed_mask = flush;
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp4_crtc->event;
+ if (event) {
+ mdp4_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp4_crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_cursor_work);
+ struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+
+ msm_gem_unpin_iova(val, kms->aspace);
+ drm_gem_object_put(val);
+}
+
+static void mdp4_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+
+ kfree(mdp4_crtc);
+}
+
+/* statically (for now) map planes to mixer stage (z-order): */
+static const int idxs[] = {
+ [VG1] = 1,
+ [VG2] = 2,
+ [RGB1] = 0,
+ [RGB2] = 0,
+ [RGB3] = 0,
+ [VG3] = 3,
+ [VG4] = 4,
+
+};
+
+/* setup mixer config, for which we need to consider all crtc's and
+ * the planes attached to them
+ *
+ * TODO may possibly need some extra locking here
+ */
+static void setup_mixer(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
+ struct drm_crtc *crtc;
+ uint32_t mixer_cfg = 0;
+ static const enum mdp_mixer_stage_id stages[] = {
+ STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
+ };
+
+ list_for_each_entry(crtc, &config->crtc_list, head) {
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
+ pipe_id, stages[idx]);
+ }
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ int i, ovlp = mdp4_crtc->ovlp;
+ bool alpha[4]= { false, false, false, false };
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ int idx = idxs[pipe_id];
+ if (idx > 0) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(plane->state->fb));
+ alpha[idx-1] = format->alpha_enable;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ uint32_t op;
+
+ if (alpha[i]) {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
+ MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
+ } else {
+ op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
+ }
+
+ setup_mixer(mdp4_kms);
+}
+
+static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ int ovlp = mdp4_crtc->ovlp;
+ struct drm_display_mode *mode;
+
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
+
+ DBG("%s: set mode: " DRM_MODE_FMT,
+ mdp4_crtc->name, DRM_MODE_ARG(mode));
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
+ MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
+
+ /* take data from pipe: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
+ MDP4_DMA_DST_SIZE_WIDTH(0) |
+ MDP4_DMA_DST_SIZE_HEIGHT(0));
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
+ MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
+
+ if (dma == DMA_E) {
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
+ }
+}
+
+static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ unsigned long flags;
+
+ DBG("%s", mdp4_crtc->name);
+
+ if (WARN_ON(!mdp4_crtc->enabled))
+ return;
+
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
+ mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp4_crtc->event);
+ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
+ }
+
+ mdp4_crtc->enabled = false;
+}
+
+static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ DBG("%s", mdp4_crtc->name);
+
+ if (WARN_ON(mdp4_crtc->enabled))
+ return;
+
+ mdp4_enable(mdp4_kms);
+
+ /* Restore vblank irq handling after power is enabled */
+ drm_crtc_vblank_on(crtc);
+
+ mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
+
+ crtc_flush(crtc);
+
+ mdp4_crtc->enabled = true;
+}
+
+static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: check", mdp4_crtc->name);
+ // TODO anything else to check?
+ return 0;
+}
+
+static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s: begin", mdp4_crtc->name);
+}
+
+static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
+
+ WARN_ON(mdp4_crtc->event);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ blend_setup(crtc);
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_FLIP);
+}
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+/* called from IRQ to update cursor related registers (if needed). The
+ * cursor registers, other than x/y position, appear not to be double
+ * buffered, and changing them other than from vblank seems to trigger
+ * underflow.
+ */
+static void update_cursor(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ if (mdp4_crtc->cursor.stale) {
+ struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
+ struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
+
+ if (next_bo) {
+ /* take a obj ref + iova ref when we start scanning out: */
+ drm_gem_object_get(next_bo);
+ msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
+
+ /* enable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
+ MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
+ MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+ MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
+ MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
+ } else {
+ /* disable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ mdp4_kms->blank_cursor_iova);
+ }
+
+ /* and drop the iova ref + obj rev when done scanning out: */
+ if (prev_bo)
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
+
+ mdp4_crtc->cursor.scanout_bo = next_bo;
+ mdp4_crtc->cursor.stale = false;
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+}
+
+static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *cursor_bo, *old_bo;
+ unsigned long flags;
+ uint64_t iova;
+ int ret;
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ if (handle) {
+ cursor_bo = drm_gem_object_lookup(file_priv, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+ } else {
+ cursor_bo = NULL;
+ }
+
+ if (cursor_bo) {
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
+ if (ret)
+ goto fail;
+ } else {
+ iova = 0;
+ }
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ old_bo = mdp4_crtc->cursor.next_bo;
+ mdp4_crtc->cursor.next_bo = cursor_bo;
+ mdp4_crtc->cursor.next_iova = iova;
+ mdp4_crtc->cursor.width = width;
+ mdp4_crtc->cursor.height = height;
+ mdp4_crtc->cursor.stale = true;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ if (old_bo) {
+ /* drop our previous reference: */
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
+ }
+
+ request_pending(crtc, PENDING_CURSOR);
+
+ return 0;
+
+fail:
+ drm_gem_object_put(cursor_bo);
+ return ret;
+}
+
+static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs mdp4_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp4_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .cursor_set = mdp4_crtc_cursor_set,
+ .cursor_move = mdp4_crtc_cursor_move,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+};
+
+static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
+ .mode_set_nofb = mdp4_crtc_mode_set_nofb,
+ .atomic_check = mdp4_crtc_atomic_check,
+ .atomic_begin = mdp4_crtc_atomic_begin,
+ .atomic_flush = mdp4_crtc_atomic_flush,
+ .atomic_enable = mdp4_crtc_atomic_enable,
+ .atomic_disable = mdp4_crtc_atomic_disable,
+};
+
+static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+
+ pending = atomic_xchg(&mdp4_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ }
+
+ if (pending & PENDING_CURSOR) {
+ update_cursor(crtc);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+ }
+}
+
+static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ int ret;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ return;
+
+ ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
+ !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
+ mdp4_crtc->flushed_mask),
+ msecs_to_jiffies(50));
+ if (ret <= 0)
+ dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
+
+ mdp4_crtc->flushed_mask = 0;
+
+ drm_crtc_vblank_put(crtc);
+}
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ return mdp4_crtc->vblank.irqmask;
+}
+
+/* set dma config, ie. the format the encoder wants. */
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t intf_sel;
+
+ intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
+
+ switch (mdp4_crtc->dma) {
+ case DMA_P:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
+ break;
+ case DMA_S:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
+ break;
+ case DMA_E:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
+ break;
+ }
+
+ if (intf == INTF_DSI_VIDEO) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ } else if (intf == INTF_DSI_CMD) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
+ }
+
+ mdp4_crtc->mixer = mixer;
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
+}
+
+void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
+{
+ /* wait_for_flush_done is the only case for now.
+ * Later we will have command mode CRTC to wait for
+ * other event.
+ */
+ mdp4_crtc_wait_for_flush_done(crtc);
+}
+
+static const char *dma_names[] = {
+ "DMA_P", "DMA_S", "DMA_E",
+};
+
+/* initialize crtc */
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp4_crtc *mdp4_crtc;
+
+ mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
+ if (!mdp4_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &mdp4_crtc->base;
+
+ mdp4_crtc->id = id;
+
+ mdp4_crtc->ovlp = ovlp_id;
+ mdp4_crtc->dma = dma_id;
+
+ mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
+ mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
+
+ mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
+ mdp4_crtc->err.irq = mdp4_crtc_err_irq;
+
+ snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
+ dma_names[dma_id], ovlp_id);
+
+ spin_lock_init(&mdp4_crtc->cursor.lock);
+
+ drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
+ "unref cursor", unref_cursor_worker);
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
+ NULL);
+ drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
new file mode 100644
index 000000000..39b8fe53c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, Inforce Computing. All rights reserved.
+ *
+ * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp4_kms.h"
+
+#ifdef CONFIG_DRM_MSM_DSI
+
+struct mdp4_dsi_encoder {
+ struct drm_encoder base;
+ struct drm_panel *panel;
+ bool enabled;
+};
+#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dsi_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
+ .destroy = mdp4_dsi_encoder_destroy,
+};
+
+static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dsi_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL,
+ MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL,
+ MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR,
+ MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL,
+ MDP4_DSI_ACTIVE_HCTL_START(0) |
+ MDP4_DSI_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (!mdp4_dsi_encoder->enabled)
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
+
+ mdp4_dsi_encoder->enabled = false;
+}
+
+static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (mdp4_dsi_encoder->enabled)
+ return;
+
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN |
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+
+ mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1);
+
+ mdp4_dsi_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = {
+ .mode_set = mdp4_dsi_encoder_mode_set,
+ .disable = mdp4_dsi_encoder_disable,
+ .enable = mdp4_dsi_encoder_enable,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dsi_encoder *mdp4_dsi_encoder;
+ int ret;
+
+ mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
+ if (!mdp4_dsi_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dsi_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dsi_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 000000000..88645dbc3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp4_kms.h"
+
+struct mdp4_dtv_encoder {
+ struct drm_encoder base;
+ struct clk *hdmi_clk;
+ struct clk *mdp_clk;
+ unsigned long int pixclock;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dtv_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
+ .destroy = mdp4_dtv_encoder_destroy,
+};
+
+static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ mdp4_dtv_encoder->pixclock = mode->clock * 1000;
+
+ DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
+ MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
+ MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
+ MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
+ MDP4_DTV_ACTIVE_HCTL_START(0) |
+ MDP4_DTV_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+
+ if (WARN_ON(!mdp4_dtv_encoder->enabled))
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
+
+ clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+ mdp4_dtv_encoder->enabled = false;
+}
+
+static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ unsigned long pc = mdp4_dtv_encoder->pixclock;
+ int ret;
+
+ if (WARN_ON(mdp4_dtv_encoder->enabled))
+ return;
+
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+ mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
+
+ DBG("setting mdp_clk=%lu", pc);
+
+ ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ pc, ret);
+
+ ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+
+ mdp4_dtv_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
+ .mode_set = mdp4_dtv_encoder_mode_set,
+ .enable = mdp4_dtv_encoder_enable,
+ .disable = mdp4_dtv_encoder_disable,
+};
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder;
+ int ret;
+
+ mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
+ if (!mdp4_dtv_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dtv_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
+
+ mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
+ if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
+ if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
+ goto fail;
+ }
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dtv_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
new file mode 100644
index 000000000..4d49f3ba6
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
+{
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
+ mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
+ DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
+ drm_state_dump(mdp4_kms->dev, &p);
+ }
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+ mdp4_disable(mdp4_kms);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct mdp_irq *error_handler = &mdp4_kms->error_handler;
+
+ error_handler->irq = mdp4_irq_error_handler;
+ error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+ MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+ mdp_irq_register(mdp_kms, error_handler);
+
+ return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+ mdp4_disable(mdp4_kms);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status, enable;
+
+ enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable;
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), true);
+ mdp4_disable(mdp4_kms);
+
+ return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+
+ mdp4_enable(mdp4_kms);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp4_crtc_vblank(crtc), false);
+ mdp4_disable(mdp4_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
new file mode 100644
index 000000000..964573d26
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "mdp4_kms.h"
+
+static int mdp4_hw_init(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp4_kms->dev;
+ u32 dmap_cfg, vg_cfg;
+ unsigned long clk;
+
+ pm_runtime_get_sync(dev->dev);
+
+ if (mdp4_kms->rev > 1) {
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
+
+ /* max read pending cmd config, 3 pending requests: */
+ mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
+
+ clk = clk_get_rate(mdp4_kms->clk);
+
+ if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
+ dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
+ vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
+ } else {
+ dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
+ vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
+ }
+
+ DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
+
+ if (mdp4_kms->rev >= 2)
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0);
+
+ /* disable CSC matrix / YUV by default: */
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
+
+ if (mdp4_kms->rev > 1)
+ mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+
+ pm_runtime_put_sync(dev->dev);
+
+ return 0;
+}
+
+static void mdp4_enable_commit(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_enable(mdp4_kms);
+}
+
+static void mdp4_disable_commit(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ mdp4_disable(mdp4_kms);
+}
+
+static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+}
+
+static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ /* TODO */
+}
+
+static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask)
+ mdp4_crtc_wait_for_commit_done(crtc);
+}
+
+static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+}
+
+static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ /* if we had >1 encoder, we'd need something more clever: */
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_TMDS:
+ return mdp4_dtv_round_pixclk(encoder, rate);
+ case DRM_MODE_ENCODER_LVDS:
+ case DRM_MODE_ENCODER_DSI:
+ default:
+ return rate;
+ }
+}
+
+static void mdp4_destroy(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct device *dev = mdp4_kms->dev->dev;
+ struct msm_gem_address_space *aspace = kms->aspace;
+
+ if (mdp4_kms->blank_cursor_iova)
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ drm_gem_object_put(mdp4_kms->blank_cursor_bo);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+
+ if (mdp4_kms->rpm_enabled)
+ pm_runtime_disable(dev);
+
+ mdp_kms_destroy(&mdp4_kms->base);
+
+ kfree(mdp4_kms);
+}
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp4_hw_init,
+ .irq_preinstall = mdp4_irq_preinstall,
+ .irq_postinstall = mdp4_irq_postinstall,
+ .irq_uninstall = mdp4_irq_uninstall,
+ .irq = mdp4_irq,
+ .enable_vblank = mdp4_enable_vblank,
+ .disable_vblank = mdp4_disable_vblank,
+ .enable_commit = mdp4_enable_commit,
+ .disable_commit = mdp4_disable_commit,
+ .prepare_commit = mdp4_prepare_commit,
+ .flush_commit = mdp4_flush_commit,
+ .wait_flush = mdp4_wait_flush,
+ .complete_commit = mdp4_complete_commit,
+ .get_format = mdp_get_format,
+ .round_pixclk = mdp4_round_pixclk,
+ .destroy = mdp4_destroy,
+ },
+ .set_irqmask = mdp4_set_irqmask,
+};
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp4_kms->clk);
+ clk_disable_unprepare(mdp4_kms->pclk);
+ clk_disable_unprepare(mdp4_kms->lut_clk);
+ clk_disable_unprepare(mdp4_kms->axi_clk);
+
+ return 0;
+}
+
+int mdp4_enable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp4_kms->clk);
+ clk_prepare_enable(mdp4_kms->pclk);
+ clk_prepare_enable(mdp4_kms->lut_clk);
+ clk_prepare_enable(mdp4_kms->axi_clk);
+
+ return 0;
+}
+
+
+static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
+ int intf_type)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct device_node *panel_node;
+ int dsi_id;
+ int ret;
+
+ switch (intf_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ /*
+ * bail out early if there is no panel node (no need to
+ * initialize LCDC encoder and LVDS connector)
+ */
+ panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0);
+ if (!panel_node)
+ return 0;
+
+ encoder = mdp4_lcdc_encoder_init(dev, panel_node);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
+ of_node_put(panel_node);
+ return PTR_ERR(encoder);
+ }
+
+ /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */
+ encoder->possible_crtcs = 1 << DMA_P;
+
+ connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
+ of_node_put(panel_node);
+ return PTR_ERR(connector);
+ }
+
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ encoder = mdp4_dtv_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
+ return PTR_ERR(encoder);
+ }
+
+ /* DTV can be hooked to DMA_E: */
+ encoder->possible_crtcs = 1 << 1;
+
+ if (priv->hdmi) {
+ /* Construct bridge/connector for HDMI: */
+ ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ return ret;
+ }
+ }
+
+ break;
+ case DRM_MODE_ENCODER_DSI:
+ /* only DSI1 supported for now */
+ dsi_id = 0;
+
+ if (!priv->dsi[dsi_id])
+ break;
+
+ encoder = mdp4_dsi_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to construct DSI encoder: %d\n", ret);
+ return ret;
+ }
+
+ /* TODO: Add DMA_S later? */
+ encoder->possible_crtcs = 1 << DMA_P;
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
+ ret);
+ return ret;
+ }
+
+ break;
+ default:
+ DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int i, ret;
+ static const enum mdp4_pipe rgb_planes[] = {
+ RGB1, RGB2,
+ };
+ static const enum mdp4_pipe vg_planes[] = {
+ VG1, VG2,
+ };
+ static const enum mdp4_dma mdp4_crtcs[] = {
+ DMA_P, DMA_E,
+ };
+ static const char * const mdp4_crtc_names[] = {
+ "DMA_P", "DMA_E",
+ };
+ static const int mdp4_intfs[] = {
+ DRM_MODE_ENCODER_LVDS,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_ENCODER_TMDS,
+ };
+
+ /* construct non-private planes: */
+ for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
+ plane = mdp4_plane_init(dev, vg_planes[i], false);
+ if (IS_ERR(plane)) {
+ DRM_DEV_ERROR(dev->dev,
+ "failed to construct plane for VG%d\n", i + 1);
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
+ plane = mdp4_plane_init(dev, rgb_planes[i], true);
+ if (IS_ERR(plane)) {
+ DRM_DEV_ERROR(dev->dev,
+ "failed to construct plane for RGB%d\n", i + 1);
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
+ mdp4_crtcs[i]);
+ if (IS_ERR(crtc)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
+ mdp4_crtc_names[i]);
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /*
+ * we currently set up two relatively fixed paths:
+ *
+ * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS
+ * or
+ * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel
+ *
+ * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
+ ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
+ u32 *major, u32 *minor)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ u32 version;
+
+ mdp4_enable(mdp4_kms);
+ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+ mdp4_disable(mdp4_kms);
+
+ *major = FIELD(version, MDP4_VERSION_MAJOR);
+ *minor = FIELD(version, MDP4_VERSION_MINOR);
+
+ DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
+}
+
+static int mdp4_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp4_kms *mdp4_kms;
+ struct msm_kms *kms = NULL;
+ struct iommu_domain *iommu;
+ struct msm_gem_address_space *aspace;
+ int irq, ret;
+ u32 major, minor;
+ unsigned long max_clk;
+
+ /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
+ max_clk = 266667000;
+
+ mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
+ if (!mdp4_kms) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
+ return -ENOMEM;
+ }
+
+ ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
+ goto fail;
+ }
+
+ priv->kms = &mdp4_kms->base.base;
+ kms = priv->kms;
+
+ mdp4_kms->dev = dev;
+
+ mdp4_kms->mmio = msm_ioremap(pdev, NULL);
+ if (IS_ERR(mdp4_kms->mmio)) {
+ ret = PTR_ERR(mdp4_kms->mmio);
+ goto fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ kms->irq = irq;
+
+ /* NOTE: driver for this regulator still missing upstream.. use
+ * _get_exclusive() and ignore the error if it does not exist
+ * (and hope that the bootloader left it on for us)
+ */
+ mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
+ if (IS_ERR(mdp4_kms->vdd))
+ mdp4_kms->vdd = NULL;
+
+ if (mdp4_kms->vdd) {
+ ret = regulator_enable(mdp4_kms->vdd);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(mdp4_kms->clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
+ ret = PTR_ERR(mdp4_kms->clk);
+ goto fail;
+ }
+
+ mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(mdp4_kms->pclk))
+ mdp4_kms->pclk = NULL;
+
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(mdp4_kms->axi_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
+ ret = PTR_ERR(mdp4_kms->axi_clk);
+ goto fail;
+ }
+
+ clk_set_rate(mdp4_kms->clk, max_clk);
+
+ read_mdp_hw_revision(mdp4_kms, &major, &minor);
+
+ if (major != 4) {
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mdp4_kms->rev = minor;
+
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
+ clk_set_rate(mdp4_kms->lut_clk, max_clk);
+ }
+
+ pm_runtime_enable(dev->dev);
+ mdp4_kms->rpm_enabled = true;
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp4_enable(mdp4_kms);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+ mdp4_disable(mdp4_kms);
+ mdelay(16);
+
+ iommu = iommu_domain_alloc(pdev->dev.bus);
+ if (iommu) {
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
+
+ aspace = msm_gem_address_space_create(mmu,
+ "mdp4", 0x1000, 0x100000000 - 0x1000);
+
+ if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ kms->aspace = aspace;
+ } else {
+ DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ aspace = NULL;
+ }
+
+ ret = modeset_init(mdp4_kms);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
+ if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+ ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+ DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ mdp4_kms->blank_cursor_bo = NULL;
+ goto fail;
+ }
+
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ &mdp4_kms->blank_cursor_iova);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ goto fail;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ return 0;
+
+fail:
+ if (kms)
+ mdp4_destroy(kms);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mdp4_pm_ops = {
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static int mdp4_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, mdp4_kms_init);
+}
+
+static int mdp4_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mdp4_dt_match[] = {
+ { .compatible = "qcom,mdp4" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mdp4_dt_match);
+
+static struct platform_driver mdp4_platform_driver = {
+ .probe = mdp4_probe,
+ .remove = mdp4_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "mdp4",
+ .of_match_table = mdp4_dt_match,
+ .pm = &mdp4_pm_ops,
+ },
+};
+
+void __init msm_mdp4_register(void)
+{
+ platform_driver_register(&mdp4_platform_driver);
+}
+
+void __exit msm_mdp4_unregister(void)
+{
+ platform_driver_unregister(&mdp4_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
new file mode 100644
index 000000000..01179e764
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP4_KMS_H__
+#define __MDP4_KMS_H__
+
+#include <drm/drm_panel.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "disp/mdp_kms.h"
+#include "mdp4.xml.h"
+
+struct device_node;
+
+struct mdp4_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ void __iomem *mmio;
+
+ struct regulator *vdd;
+
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *lut_clk;
+ struct clk *axi_clk;
+
+ struct mdp_irq error_handler;
+
+ bool rpm_enabled;
+
+ /* empty/blank cursor bo to use when cursor is "disabled" */
+ struct drm_gem_object *blank_cursor_bo;
+ uint64_t blank_cursor_iova;
+};
+#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
+
+static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp4_kms->mmio + reg);
+}
+
+static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
+{
+ return msm_readl(mdp4_kms->mmio + reg);
+}
+
+static inline uint32_t pipe2flush(enum mdp4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1: return MDP4_OVERLAY_FLUSH_VG1;
+ case VG2: return MDP4_OVERLAY_FLUSH_VG2;
+ case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
+ case RGB2: return MDP4_OVERLAY_FLUSH_RGB2;
+ default: return 0;
+ }
+}
+
+static inline uint32_t ovlp2flush(int ovlp)
+{
+ switch (ovlp) {
+ case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
+ case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2irq(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_DMA_P_DONE;
+ case DMA_S: return MDP4_IRQ_DMA_S_DONE;
+ case DMA_E: return MDP4_IRQ_DMA_E_DONE;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2err(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
+ case DMA_S: return 0; // ???
+ case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer,
+ enum mdp4_pipe pipe, enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case VG1:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ break;
+ case VG2:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ break;
+ case RGB1:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ break;
+ case RGB2:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ break;
+ case RGB3:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ break;
+ case VG3:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ break;
+ case VG4:
+ mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK |
+ MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) |
+ COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ break;
+ default:
+ WARN(1, "invalid pipe");
+ break;
+ }
+
+ return mixer_cfg;
+}
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms);
+int mdp4_enable(struct mdp4_kms *mdp4_kms);
+
+void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
+void mdp4_irq_preinstall(struct msm_kms *kms);
+int mdp4_irq_postinstall(struct msm_kms *kms);
+void mdp4_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp4_irq(struct msm_kms *kms);
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1:
+ case VG2:
+ case VG3:
+ case VG4:
+ return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+ case RGB1:
+ case RGB2:
+ case RGB3:
+ return MDP_PIPE_CAP_SCALE;
+ default:
+ return 0;
+ }
+}
+
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mdp4_pipe pipe_id, bool private_plane);
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
+void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id);
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
+
+long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
+ struct device_node *panel_node);
+
+struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
+ struct device_node *panel_node, struct drm_encoder *encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
+#else
+static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#ifdef CONFIG_COMMON_CLK
+struct clk *mpd4_lvds_pll_init(struct drm_device *dev);
+#else
+static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
new file mode 100644
index 000000000..10eb3e5b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp4_kms.h"
+
+struct mdp4_lcdc_encoder {
+ struct drm_encoder base;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct clk *lcdc_clk;
+ unsigned long int pixclock;
+ struct regulator *regs[3];
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp4_lcdc_encoder(x) container_of(x, struct mdp4_lcdc_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_lcdc_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
+ .destroy = mdp4_lcdc_encoder_destroy,
+};
+
+/* this should probably be a helper: */
+static struct drm_connector *get_connector(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ return connector;
+
+ return NULL;
+}
+
+static void setup_phy(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector = get_connector(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t lvds_intf = 0, lvds_phy_cfg0 = 0;
+ int bpp, nchan, swap;
+
+ if (!connector)
+ return;
+
+ bpp = 3 * connector->display_info.bpc;
+
+ if (!bpp)
+ bpp = 18;
+
+ /* TODO, these should come from panel somehow: */
+ nchan = 1;
+ swap = 0;
+
+ switch (bpp) {
+ case 24:
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x08) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x05) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x04) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x03));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x02) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x01) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x00));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x11) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x10) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0d) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0c));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0b) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0a) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x09));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x15));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x14) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x13) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x12));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(3),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1b) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x17) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x16) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0f));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(3),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0e) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x07) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x06));
+ if (nchan == 2) {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ } else {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ }
+ break;
+
+ case 18:
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x0a) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x07) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x06) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x05));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x04) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x03) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x02));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x13) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x12) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0f) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0e));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0d) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0c) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x0b));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2),
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) |
+ MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x17));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2),
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x16) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x15) |
+ MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x14));
+ if (nchan == 2) {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ } else {
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN;
+ }
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT;
+ break;
+
+ default:
+ DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
+ return;
+ }
+
+ switch (nchan) {
+ case 1:
+ lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0;
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL;
+ break;
+ case 2:
+ lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0 |
+ MDP4_LVDS_PHY_CFG0_CHANNEL1;
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN |
+ MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
+ break;
+ default:
+ DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
+ return;
+ }
+
+ if (swap)
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP;
+
+ lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_ENABLE;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_INTF_CTL, lvds_intf);
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG2, 0x30);
+
+ mb();
+ udelay(1);
+ lvds_phy_cfg0 |= MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE;
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0);
+}
+
+static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t lcdc_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ mdp4_lcdc_encoder->pixclock = mode->clock * 1000;
+
+ DBG("pixclock=%lu", mdp4_lcdc_encoder->pixclock);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ lcdc_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + lcdc_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + lcdc_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_CTRL,
+ MDP4_LCDC_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_LCDC_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_HCTRL,
+ MDP4_LCDC_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_LCDC_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VEND, display_v_end);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_UNDERFLOW_CLR,
+ MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_LCDC_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_SKEW, lcdc_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_HCTL,
+ MDP4_LCDC_ACTIVE_HCTL_START(0) |
+ MDP4_LCDC_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0);
+}
+
+static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ struct drm_panel *panel;
+ int i, ret;
+
+ if (WARN_ON(!mdp4_lcdc_encoder->enabled))
+ return;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+
+ panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
+ if (!IS_ERR(panel)) {
+ drm_panel_disable(panel);
+ drm_panel_unprepare(panel);
+ }
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC);
+
+ clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
+ ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
+ }
+
+ mdp4_lcdc_encoder->enabled = false;
+}
+
+static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ unsigned long pc = mdp4_lcdc_encoder->pixclock;
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ struct drm_panel *panel;
+ uint32_t config;
+ int i, ret;
+
+ if (WARN_ON(mdp4_lcdc_encoder->enabled))
+ return;
+
+ /* TODO: hard-coded for 18bpp: */
+ config =
+ MDP4_DMA_CONFIG_R_BPC(BPC6) |
+ MDP4_DMA_CONFIG_G_BPC(BPC6) |
+ MDP4_DMA_CONFIG_B_BPC(BPC6) |
+ MDP4_DMA_CONFIG_PACK(0x21) |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN;
+
+ if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+ config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+ mdp4_crtc_set_config(encoder->crtc, config);
+ mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
+ ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
+ }
+
+ DBG("setting lcdc_clk=%lu", pc);
+ ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+
+ panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
+ if (!IS_ERR(panel)) {
+ drm_panel_prepare(panel);
+ drm_panel_enable(panel);
+ }
+
+ setup_phy(encoder);
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1);
+
+ mdp4_lcdc_encoder->enabled = true;
+}
+
+static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = {
+ .mode_set = mdp4_lcdc_encoder_mode_set,
+ .disable = mdp4_lcdc_encoder_disable,
+ .enable = mdp4_lcdc_encoder_enable,
+};
+
+long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
+ to_mdp4_lcdc_encoder(encoder);
+ return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
+ struct device_node *panel_node)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
+ struct regulator *reg;
+ int ret;
+
+ mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL);
+ if (!mdp4_lcdc_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdp4_lcdc_encoder->panel_node = panel_node;
+
+ encoder = &mdp4_lcdc_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
+
+ /* TODO: do we need different pll in other cases? */
+ mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
+ if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
+ ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
+ goto fail;
+ }
+
+ /* TODO: different regulators in other cases? */
+ reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+ goto fail;
+ }
+ mdp4_lcdc_encoder->regs[0] = reg;
+
+ reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+ goto fail;
+ }
+ mdp4_lcdc_encoder->regs[1] = reg;
+
+ reg = devm_regulator_get(dev->dev, "lvds-vdda");
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+ goto fail;
+ }
+ mdp4_lcdc_encoder->regs[2] = reg;
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_lcdc_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
new file mode 100644
index 000000000..7444b75c4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ * Author: Vinay Simha <vinaysimha@inforcecomputing.com>
+ */
+
+#include "mdp4_kms.h"
+
+struct mdp4_lvds_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+};
+#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base)
+
+static enum drm_connector_status mdp4_lvds_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+
+ if (!mdp4_lvds_connector->panel) {
+ mdp4_lvds_connector->panel =
+ of_drm_find_panel(mdp4_lvds_connector->panel_node);
+ if (IS_ERR(mdp4_lvds_connector->panel))
+ mdp4_lvds_connector->panel = NULL;
+ }
+
+ return mdp4_lvds_connector->panel ?
+ connector_status_connected :
+ connector_status_disconnected;
+}
+
+static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+
+ drm_connector_cleanup(connector);
+
+ kfree(mdp4_lvds_connector);
+}
+
+static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+ struct drm_panel *panel = mdp4_lvds_connector->panel;
+ int ret = 0;
+
+ if (panel)
+ ret = drm_panel_get_modes(panel, connector);
+
+ return ret;
+}
+
+static enum drm_mode_status
+mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct mdp4_lvds_connector *mdp4_lvds_connector =
+ to_mdp4_lvds_connector(connector);
+ struct drm_encoder *encoder = mdp4_lvds_connector->encoder;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = mdp4_lcdc_round_pixclk(encoder, requested);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
+ .detect = mdp4_lvds_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mdp4_lvds_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
+ .get_modes = mdp4_lvds_connector_get_modes,
+ .mode_valid = mdp4_lvds_connector_mode_valid,
+};
+
+/* initialize connector */
+struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
+ struct device_node *panel_node, struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = NULL;
+ struct mdp4_lvds_connector *mdp4_lvds_connector;
+
+ mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
+ if (!mdp4_lvds_connector)
+ return ERR_PTR(-ENOMEM);
+
+ mdp4_lvds_connector->encoder = encoder;
+ mdp4_lvds_connector->panel_node = panel_node;
+
+ connector = &mdp4_lvds_connector->base;
+
+ drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs);
+
+ connector->polled = 0;
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
new file mode 100644
index 000000000..ab8c0c187
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "mdp4_kms.h"
+
+struct mdp4_lvds_pll {
+ struct clk_hw pll_hw;
+ struct drm_device *dev;
+ unsigned long pixclk;
+};
+#define to_mdp4_lvds_pll(x) container_of(x, struct mdp4_lvds_pll, pll_hw)
+
+static struct mdp4_kms *get_kms(struct mdp4_lvds_pll *lvds_pll)
+{
+ struct msm_drm_private *priv = lvds_pll->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+struct pll_rate {
+ unsigned long rate;
+ struct {
+ uint32_t val;
+ uint32_t reg;
+ } conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+ { 72000000, {
+ { 0x8f, REG_MDP4_LVDS_PHY_PLL_CTRL_1 },
+ { 0x30, REG_MDP4_LVDS_PHY_PLL_CTRL_2 },
+ { 0xc6, REG_MDP4_LVDS_PHY_PLL_CTRL_3 },
+ { 0x10, REG_MDP4_LVDS_PHY_PLL_CTRL_5 },
+ { 0x07, REG_MDP4_LVDS_PHY_PLL_CTRL_6 },
+ { 0x62, REG_MDP4_LVDS_PHY_PLL_CTRL_7 },
+ { 0x41, REG_MDP4_LVDS_PHY_PLL_CTRL_8 },
+ { 0x0d, REG_MDP4_LVDS_PHY_PLL_CTRL_9 },
+ { 0, 0 } }
+ },
+};
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+ int i;
+ for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+ if (rate > freqtbl[i].rate)
+ return &freqtbl[i-1];
+ return &freqtbl[i-1];
+}
+
+static int mpd4_lvds_pll_enable(struct clk_hw *hw)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
+ const struct pll_rate *pll_rate = find_rate(lvds_pll->pixclk);
+ int i;
+
+ DBG("pixclk=%lu (%lu)", lvds_pll->pixclk, pll_rate->rate);
+
+ if (WARN_ON(!pll_rate))
+ return -EINVAL;
+
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_PHY_RESET, 0x33);
+
+ for (i = 0; pll_rate->conf[i].reg; i++)
+ mdp4_write(mdp4_kms, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x01);
+
+ /* Wait until LVDS PLL is locked and ready */
+ while (!mdp4_read(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_LOCKED))
+ cpu_relax();
+
+ return 0;
+}
+
+static void mpd4_lvds_pll_disable(struct clk_hw *hw)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
+
+ DBG("");
+
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, 0x0);
+ mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0);
+}
+
+static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ return lvds_pll->pixclk;
+}
+
+static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pll_rate *pll_rate = find_rate(rate);
+ return pll_rate->rate;
+}
+
+static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
+ lvds_pll->pixclk = rate;
+ return 0;
+}
+
+
+static const struct clk_ops mpd4_lvds_pll_ops = {
+ .enable = mpd4_lvds_pll_enable,
+ .disable = mpd4_lvds_pll_disable,
+ .recalc_rate = mpd4_lvds_pll_recalc_rate,
+ .round_rate = mpd4_lvds_pll_round_rate,
+ .set_rate = mpd4_lvds_pll_set_rate,
+};
+
+static const char *mpd4_lvds_pll_parents[] = {
+ "pxo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "mpd4_lvds_pll",
+ .ops = &mpd4_lvds_pll_ops,
+ .parent_names = mpd4_lvds_pll_parents,
+ .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
+};
+
+struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
+{
+ struct mdp4_lvds_pll *lvds_pll;
+ struct clk *clk;
+ int ret;
+
+ lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL);
+ if (!lvds_pll) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ lvds_pll->dev = dev;
+
+ lvds_pll->pll_hw.init = &pll_init;
+ clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail;
+ }
+
+ return clk;
+
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
new file mode 100644
index 000000000..b689b618d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include "mdp4_kms.h"
+
+#define DOWN_SCALE_MAX 8
+#define UP_SCALE_MAX 8
+
+struct mdp4_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mdp4_pipe pipe;
+
+ uint32_t caps;
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
+
+/* MDP format helper functions */
+static inline
+enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
+{
+ bool is_tile = false;
+
+ if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
+ is_tile = true;
+
+ if (fb->format->format == DRM_FORMAT_NV12 && is_tile)
+ return FRAME_TILE_YCBCR_420;
+
+ return FRAME_LINEAR;
+}
+
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+static int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp4_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp4_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(mdp4_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+static void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+static int mdp4_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp4_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = mdp4_plane_destroy,
+ .set_property = mdp4_plane_set_property,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int mdp4_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ if (!new_state->fb)
+ return 0;
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ return msm_framebuffer_prepare(new_state->fb, kms->aspace, false);
+}
+
+static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ struct drm_framebuffer *fb = old_state->fb;
+
+ if (!fb)
+ return;
+
+ DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, kms->aspace, false);
+}
+
+
+static int mdp4_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ return 0;
+}
+
+static void mdp4_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ int ret;
+
+ ret = mdp4_plane_mode_set(plane,
+ new_state->crtc, new_state->fb,
+ new_state->crtc_x, new_state->crtc_y,
+ new_state->crtc_w, new_state->crtc_h,
+ new_state->src_x, new_state->src_y,
+ new_state->src_w, new_state->src_h);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+}
+
+static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
+ .prepare_fb = mdp4_plane_prepare_fb,
+ .cleanup_fb = mdp4_plane_cleanup_fb,
+ .atomic_check = mdp4_plane_atomic_check,
+ .atomic_update = mdp4_plane_atomic_update,
+};
+
+static void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp4_kms->base.base;
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
+ MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
+ MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 0));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 1));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 2));
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 3));
+}
+
+static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
+ enum mdp4_pipe pipe, struct csc_cfg *csc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i),
+ csc->matrix[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i),
+ csc->pre_bias[i]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i),
+ csc->post_bias[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i),
+ csc->pre_clamp[i]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i),
+ csc->post_clamp[i]);
+ }
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
+
+static int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mdp4_pipe pipe = mdp4_plane->pipe;
+ const struct mdp_format *format;
+ uint32_t op_mode = 0;
+ uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ enum mdp4_frame_format frame_type;
+
+ if (!(crtc && fb)) {
+ DBG("%s: disabled!", mdp4_plane->name);
+ return 0;
+ }
+
+ frame_type = mdp4_get_frame_format(fb);
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+
+ if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (crtc_w > (src_w * UP_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (crtc_h > (src_h * UP_SCALE_MAX)) {
+ DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
+ return -ERANGE;
+ }
+
+ if (src_w != crtc_w) {
+ uint32_t sel_unit = SCALE_FIR;
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+ if (crtc_w > src_w)
+ sel_unit = SCALE_PIXEL_RPT;
+ else if (crtc_w <= (src_w / 4))
+ sel_unit = SCALE_MN_PHASE;
+
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit);
+ phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
+ src_w, crtc_w);
+ }
+ }
+
+ if (src_h != crtc_h) {
+ uint32_t sel_unit = SCALE_FIR;
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+
+ if (crtc_h > src_h)
+ sel_unit = SCALE_PIXEL_RPT;
+ else if (crtc_h <= (src_h / 4))
+ sel_unit = SCALE_MN_PHASE;
+
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit);
+ phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT,
+ src_h, crtc_h);
+ }
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
+ MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
+ MDP4_PIPE_SRC_XY_X(src_x) |
+ MDP4_PIPE_SRC_XY_Y(src_y));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
+ MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
+ MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
+
+ mdp4_plane_set_scanout(plane, fb);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
+ MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) |
+ MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) |
+ MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) |
+ COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
+ MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ if (MDP_FORMAT_IS_YUV(format)) {
+ struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB);
+
+ op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR;
+ op_mode |= MDP4_PIPE_OP_MODE_CSC_EN;
+ mdp4_write_csc_config(mdp4_kms, pipe, csc);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
+
+ if (frame_type != FRAME_LINEAR)
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe),
+ MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) |
+ MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h));
+
+ return 0;
+}
+
+static const char *pipe_names[] = {
+ "VG1", "VG2",
+ "RGB1", "RGB2", "RGB3",
+ "VG3", "VG4",
+};
+
+enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ return mdp4_plane->pipe;
+}
+
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/* initialize plane */
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mdp4_pipe pipe_id, bool private_plane)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp4_plane *mdp4_plane;
+ int ret;
+ enum drm_plane_type type;
+
+ mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
+ if (!mdp4_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp4_plane->base;
+
+ mdp4_plane->pipe = pipe_id;
+ mdp4_plane->name = pipe_names[pipe_id];
+ mdp4_plane->caps = mdp4_pipe_caps(pipe_id);
+
+ mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats,
+ ARRAY_SIZE(mdp4_plane->formats),
+ !pipe_supports_yuv(mdp4_plane->caps));
+
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats,
+ supported_format_modifiers, type, NULL);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
+
+ mdp4_plane_install_properties(plane, &plane->base);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp4_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
new file mode 100644
index 000000000..86fc44b51
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -0,0 +1,1979 @@
+#ifndef MDP5_XML
+#define MDP5_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp5_intf_type {
+ INTF_DISABLED = 0,
+ INTF_DSI = 1,
+ INTF_HDMI = 3,
+ INTF_LCDC = 5,
+ INTF_eDP = 9,
+ INTF_VIRTUAL = 100,
+ INTF_WB = 101,
+};
+
+enum mdp5_intfnum {
+ NO_INTF = 0,
+ INTF0 = 1,
+ INTF1 = 2,
+ INTF2 = 3,
+ INTF3 = 4,
+};
+
+enum mdp5_pipe {
+ SSPP_NONE = 0,
+ SSPP_VIG0 = 1,
+ SSPP_VIG1 = 2,
+ SSPP_VIG2 = 3,
+ SSPP_RGB0 = 4,
+ SSPP_RGB1 = 5,
+ SSPP_RGB2 = 6,
+ SSPP_DMA0 = 7,
+ SSPP_DMA1 = 8,
+ SSPP_VIG3 = 9,
+ SSPP_RGB3 = 10,
+ SSPP_CURSOR0 = 11,
+ SSPP_CURSOR1 = 12,
+};
+
+enum mdp5_format {
+ DUMMY = 0,
+};
+
+enum mdp5_ctl_mode {
+ MODE_NONE = 0,
+ MODE_WB_0_BLOCK = 1,
+ MODE_WB_1_BLOCK = 2,
+ MODE_WB_0_LINE = 3,
+ MODE_WB_1_LINE = 4,
+ MODE_WB_2_LINE = 5,
+};
+
+enum mdp5_pack_3d {
+ PACK_3D_FRAME_INT = 0,
+ PACK_3D_H_ROW_INT = 1,
+ PACK_3D_V_ROW_INT = 2,
+ PACK_3D_COL_INT = 3,
+};
+
+enum mdp5_scale_filter {
+ SCALE_FILTER_NEAREST = 0,
+ SCALE_FILTER_BIL = 1,
+ SCALE_FILTER_PCMN = 2,
+ SCALE_FILTER_CA = 3,
+};
+
+enum mdp5_pipe_bwc {
+ BWC_LOSSLESS = 0,
+ BWC_Q_HIGH = 1,
+ BWC_Q_MED = 2,
+};
+
+enum mdp5_cursor_format {
+ CURSOR_FMT_ARGB8888 = 0,
+ CURSOR_FMT_ARGB1555 = 2,
+ CURSOR_FMT_ARGB4444 = 4,
+};
+
+enum mdp5_cursor_alpha {
+ CURSOR_ALPHA_CONST = 0,
+ CURSOR_ALPHA_PER_PIXEL = 2,
+};
+
+enum mdp5_igc_type {
+ IGC_VIG = 0,
+ IGC_RGB = 1,
+ IGC_DMA = 2,
+ IGC_DSPP = 3,
+};
+
+enum mdp5_data_format {
+ DATA_FORMAT_RGB = 0,
+ DATA_FORMAT_YUV = 1,
+};
+
+enum mdp5_block_size {
+ BLOCK_SIZE_64 = 0,
+ BLOCK_SIZE_128 = 1,
+};
+
+enum mdp5_rotate_mode {
+ ROTATE_0 = 0,
+ ROTATE_90 = 1,
+};
+
+enum mdp5_chroma_downsample_method {
+ DS_MTHD_NO_PIXEL_DROP = 0,
+ DS_MTHD_PIXEL_DROP = 1,
+};
+
+#define MDP5_IRQ_WB_0_DONE 0x00000001
+#define MDP5_IRQ_WB_1_DONE 0x00000002
+#define MDP5_IRQ_WB_2_DONE 0x00000010
+#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100
+#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200
+#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400
+#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800
+#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000
+#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000
+#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000
+#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000
+#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000
+#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000
+#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000
+#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000
+#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000
+#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000
+#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000
+#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000
+#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000
+#define MDP5_IRQ_INTF0_VSYNC 0x02000000
+#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000
+#define MDP5_IRQ_INTF1_VSYNC 0x08000000
+#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000
+#define MDP5_IRQ_INTF2_VSYNC 0x20000000
+#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000
+#define MDP5_IRQ_INTF3_VSYNC 0x80000000
+#define REG_MDSS_HW_VERSION 0x00000000
+#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDSS_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK;
+}
+#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDSS_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK;
+}
+#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDSS_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDSS_HW_INTR_STATUS 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001
+#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010
+#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020
+#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
+#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
+
+#define REG_MDP5_HW_VERSION 0x00000000
+#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
+}
+#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
+}
+#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP5_DISP_INTF_SEL 0x00000004
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
+}
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
+{
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
+}
+
+#define REG_MDP5_INTR_EN 0x00000010
+
+#define REG_MDP5_INTR_STATUS 0x00000014
+
+#define REG_MDP5_INTR_CLEAR 0x00000018
+
+#define REG_MDP5_HIST_INTR_EN 0x0000001c
+
+#define REG_MDP5_HIST_INTR_STATUS 0x00000020
+
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
+
+#define REG_MDP5_SPARE_0 0x00000028
+#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
+}
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
+{
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
+}
+
+static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
+{
+ switch (idx) {
+ case IGC_VIG: return 0x00000200;
+ case IGC_RGB: return 0x00000210;
+ case IGC_DMA: return 0x00000220;
+ case IGC_DSPP: return 0x00000300;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
+
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
+{
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
+}
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+
+#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
+
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
+
+static inline uint32_t __offset_CTL(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ctl.base[0]);
+ case 1: return (mdp5_cfg->ctl.base[1]);
+ case 2: return (mdp5_cfg->ctl.base[2]);
+ case 3: return (mdp5_cfg->ctl.base[3]);
+ case 4: return (mdp5_cfg->ctl.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); }
+
+static inline uint32_t __offset_LAYER(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000000;
+ case 1: return 0x00000004;
+ case 2: return 0x00000008;
+ case 3: return 0x0000000c;
+ case 4: return 0x00000010;
+ case 5: return 0x00000024;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
+#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
+#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038
+#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0
+#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00
+#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000
+#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000
+#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000
+#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK;
+}
+#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000
+#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21
+static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK;
+}
+#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
+#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
+#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
+#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); }
+#define MDP5_CTL_OP_MODE__MASK 0x0000000f
+#define MDP5_CTL_OP_MODE__SHIFT 0
+static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
+{
+ return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK;
+}
+#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070
+#define MDP5_CTL_OP_INTF_NUM__SHIFT 4
+static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val)
+{
+ return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK;
+}
+#define MDP5_CTL_OP_CMD_MODE 0x00020000
+#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000
+#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000
+#define MDP5_CTL_OP_PACK_3D__SHIFT 20
+static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
+{
+ return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
+}
+
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); }
+#define MDP5_CTL_FLUSH_VIG0 0x00000001
+#define MDP5_CTL_FLUSH_VIG1 0x00000002
+#define MDP5_CTL_FLUSH_VIG2 0x00000004
+#define MDP5_CTL_FLUSH_RGB0 0x00000008
+#define MDP5_CTL_FLUSH_RGB1 0x00000010
+#define MDP5_CTL_FLUSH_RGB2 0x00000020
+#define MDP5_CTL_FLUSH_LM0 0x00000040
+#define MDP5_CTL_FLUSH_LM1 0x00000080
+#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_LM3 0x00000200
+#define MDP5_CTL_FLUSH_LM4 0x00000400
+#define MDP5_CTL_FLUSH_DMA0 0x00000800
+#define MDP5_CTL_FLUSH_DMA1 0x00001000
+#define MDP5_CTL_FLUSH_DSPP0 0x00002000
+#define MDP5_CTL_FLUSH_DSPP1 0x00004000
+#define MDP5_CTL_FLUSH_DSPP2 0x00008000
+#define MDP5_CTL_FLUSH_WB 0x00010000
+#define MDP5_CTL_FLUSH_CTL 0x00020000
+#define MDP5_CTL_FLUSH_VIG3 0x00040000
+#define MDP5_CTL_FLUSH_RGB3 0x00080000
+#define MDP5_CTL_FLUSH_LM5 0x00100000
+#define MDP5_CTL_FLUSH_DSPP3 0x00200000
+#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
+#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
+#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000
+#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
+#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
+#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
+#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
+
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
+
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
+
+static inline uint32_t __offset_LAYER_EXT(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000040;
+ case 1: return 0x00000044;
+ case 2: return 0x00000048;
+ case 3: return 0x0000004c;
+ case 4: return 0x00000050;
+ case 5: return 0x00000054;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+
+static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); }
+#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001
+#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004
+#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010
+#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040
+#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100
+#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400
+#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000
+#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000
+#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000
+#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK;
+}
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000
+#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK;
+}
+
+static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
+{
+ switch (idx) {
+ case SSPP_NONE: return (INVALID_IDX(idx));
+ case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
+ case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
+ case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
+ case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]);
+ case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]);
+ case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]);
+ case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]);
+ case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
+ case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
+ case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
+ case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]);
+ case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); }
+#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000
+#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19
+static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val)
+{
+ return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
+}
+#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000
+#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18
+static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val)
+{
+ return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
+}
+#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000
+
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK;
+}
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); }
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
+#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK;
+}
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK;
+}
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; }
+#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); }
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); }
+#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
+#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
+static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK;
+}
+#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff
+#define MDP5_PIPE_OUT_XY_X__SHIFT 0
+static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000
+#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK;
+}
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000
+#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23
+static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val)
+{
+ return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
+#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
+#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
+static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
+{
+ return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK;
+}
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000
+#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000
+#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
+#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
+#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000
+
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); }
+#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
+#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
+static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK;
+}
+#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00
+#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8
+static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
+}
+
+static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx)
+{
+ switch (idx) {
+ case COMP_0: return 0x00000100;
+ case COMP_1_2: return 0x00000110;
+ case COMP_3: return 0x00000120;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00
+#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000
+#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00
+#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000
+#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); }
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK;
+}
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000
+#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16
+static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val)
+{
+ return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK;
+}
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000
+#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18
+static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val)
+{
+ return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK;
+}
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
+
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
+
+static inline uint32_t __offset_LM(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->lm.base[0]);
+ case 1: return (mdp5_cfg->lm.base[1]);
+ case 2: return (mdp5_cfg->lm.base[2]);
+ case 3: return (mdp5_cfg->lm.base[3]);
+ case 4: return (mdp5_cfg->lm.base[4]);
+ case 5: return (mdp5_cfg->lm.base[5]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040
+#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080
+#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000
+
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
+#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK;
+}
+#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
+
+static inline uint32_t __offset_BLEND(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000020;
+ case 1: return 0x00000050;
+ case 2: return 0x00000080;
+ case 3: return 0x000000b0;
+ case 4: return 0x00000230;
+ case 5: return 0x00000260;
+ case 6: return 0x00000290;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); }
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
+#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004
+#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008
+#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010
+#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300
+#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8
+static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
+{
+ return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK;
+}
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400
+#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800
+#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
+#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK;
+}
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000
+#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK;
+}
+#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000
+#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK;
+}
+#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000
+#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007
+#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val)
+{
+ return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff
+#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0
+static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK;
+}
+#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000
+#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16
+static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val)
+{
+ return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK;
+}
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1
+static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val)
+{
+ return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK;
+}
+#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); }
+
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); }
+
+static inline uint32_t __offset_DSPP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->dspp.base[0]);
+ case 1: return (mdp5_cfg->dspp.base[1]);
+ case 2: return (mdp5_cfg->dspp.base[2]);
+ case 3: return (mdp5_cfg->dspp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
+#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
+#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
+static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
+{
+ return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK;
+}
+#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010
+#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100
+#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000
+#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000
+#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000
+#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000
+#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
+#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
+
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); }
+
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
+
+static inline uint32_t __offset_PP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->pp.base[0]);
+ case 1: return (mdp5_cfg->pp.base[1]);
+ case 2: return (mdp5_cfg->pp.base[2]);
+ case 3: return (mdp5_cfg->pp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000
+#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000
+
+static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); }
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK;
+}
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); }
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff
+#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK;
+}
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000
+#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16
+static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); }
+#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff
+#define MDP5_PP_SYNC_THRESH_START__SHIFT 0
+static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK;
+}
+#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000
+#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16
+static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val)
+{
+ return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); }
+
+static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
+
+static inline uint32_t __offset_WB(uint32_t idx)
+{
+ switch (idx) {
+#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */
+ case 0: return (mdp5_cfg->wb.base[0]);
+ case 1: return (mdp5_cfg->wb.base[1]);
+ case 2: return (mdp5_cfg->wb.base[2]);
+ case 3: return (mdp5_cfg->wb.base[3]);
+ case 4: return (mdp5_cfg->wb.base[4]);
+#endif
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
+#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003
+#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c
+#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030
+#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0
+#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6
+static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100
+#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600
+#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9
+static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK;
+}
+#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000
+#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12
+static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000
+#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000
+#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000
+#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000
+#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19
+static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23
+static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK;
+}
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000
+#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26
+static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK;
+}
+#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000
+#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30
+static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); }
+#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001
+#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006
+#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1
+static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010
+#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4
+static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020
+#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5
+static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040
+#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100
+#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200
+#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9
+static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400
+#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10
+static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12
+static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13
+static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK;
+}
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000
+#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14
+static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); }
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK;
+}
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK;
+}
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK;
+}
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000
+#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24
+static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); }
+#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff
+#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0
+static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK;
+}
+#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000
+#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16
+static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); }
+#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff
+#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0
+static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK;
+}
+#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000
+#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16
+static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val)
+{
+ return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); }
+#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff
+#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0
+static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val)
+{
+ return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK;
+}
+#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000
+#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16
+static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val)
+{
+ return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); }
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK;
+}
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
+#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); }
+#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
+#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK;
+}
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK;
+}
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00
+#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8
+static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
+#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff
+#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0
+static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val)
+{
+ return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK;
+}
+
+static inline uint32_t __offset_INTF(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->intf.base[0]);
+ case 1: return (mdp5_cfg->intf.base[1]);
+ case 2: return (mdp5_cfg->intf.base[2]);
+ case 3: return (mdp5_cfg->intf.base[3]);
+ case 4: return (mdp5_cfg->intf.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); }
+#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
+#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK;
+}
+#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000
+#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16
+static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); }
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK;
+}
+#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); }
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
+#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); }
+#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
+#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK;
+}
+#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000
+#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
+}
+
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); }
+#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK;
+}
+#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
+
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); }
+#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
+#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
+#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
+
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); }
+
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); }
+
+static inline uint32_t __offset_AD(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ad.base[0]);
+ case 1: return (mdp5_cfg->ad.base[1]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); }
+
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); }
+
+
+#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
new file mode 100644
index 000000000..1f1555aa0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -0,0 +1,1333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_cfg.h"
+
+struct mdp5_cfg_handler {
+ int revision;
+ struct mdp5_cfg config;
+};
+
+/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
+const struct mdp5_cfg_hw *mdp5_cfg = NULL;
+
+static const struct mdp5_cfg_hw msm8x74v1_config = {
+ .name = "msm8x74v1",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
+ .flush_hw_mask = 0x0003ffff,
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01100, 0x01500, 0x01900 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01d00, 0x02100, 0x02500 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02900, 0x02d00 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ 0,
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04500, 0x04900, 0x04d00 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x21a00, 0x21b00, 0x21c00 },
+ },
+ .intf = {
+ .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 125
+ },
+ .max_clk = 200000000,
+};
+
+static const struct mdp5_cfg_hw msm8x74v2_config = {
+ .name = "msm8x74",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 22,
+ .mmb_size = 4096,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
+ .flush_hw_mask = 0x0003ffff,
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01100, 0x01500, 0x01900 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01d00, 0x02100, 0x02500 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02900, 0x02d00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04500, 0x04900, 0x04d00 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x13000, 0x13200 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x12c00, 0x12d00, 0x12e00 },
+ },
+ .intf = {
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 125
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw apq8084_config = {
+ .name = "apq8084",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
+ .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
+ .reserved = {
+ /* Two SMP blocks are statically tied to RGB pipes: */
+ [16] = 2, [17] = 2, [18] = 2, [22] = 2,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
+ .flush_hw_mask = 0x003fffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x03100, 0x03500 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x13400, 0x13600, 0x13800 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
+ },
+ .intf = {
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 200,
+ .ib_inefficiency = 120,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw msm8x16_config = {
+ .name = "msm8x16",
+ .mdp = {
+ .count = 1,
+ .base = { 0x0 },
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 2, /* LM0 and LM3 */
+ .base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .intf = {
+ .base = { 0x00000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw msm8x36_config = {
+ .name = "msm8x36",
+ .mdp = {
+ .count = 1,
+ .base = { 0x0 },
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .ad = {
+ .count = 1,
+ .base = { 0x78000 },
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .intf = {
+ .base = { 0x00000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 366670000,
+};
+
+static const struct mdp5_cfg_hw msm8x94_config = {
+ .name = "msm8x94",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 44,
+ .mmb_size = 8192,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 4,
+ [SSPP_VIG2] = 7, [SSPP_VIG3] = 19,
+ [SSPP_DMA0] = 10, [SSPP_DMA1] = 13,
+ [SSPP_RGB0] = 16, [SSPP_RGB1] = 17,
+ [SSPP_RGB2] = 18, [SSPP_RGB3] = 22,
+ },
+ .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */
+ .reserved = {
+ [1] = 1, [4] = 1, [7] = 1, [19] = 1,
+ [16] = 5, [17] = 5, [18] = 5, [22] = 5,
+ },
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf0ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x24000, 0x26000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 100,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 400000000,
+};
+
+static const struct mdp5_cfg_hw msm8x96_config = {
+ .name = "msm8x96",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x24000, 0x26000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 2,
+ .base = { 0x34000, 0x36000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 412500000,
+};
+
+const struct mdp5_cfg_hw msm8x76_config = {
+ .name = "msm8x76",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_SMP |
+ MDP_CAP_DSC |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .smp = {
+ .mmb_count = 10,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_VIG1] = 9,
+ [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .pipe_vig = {
+ .count = 2,
+ .base = { 0x04000, 0x06000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x440DC },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x70000, 0x70800, 0x72000 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 360000000,
+};
+
+static const struct mdp5_cfg_hw msm8x53_config = {
+ .name = "msm8x53",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 3,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR },
+ { .id = 1, .pp = 1, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 2,
+ .base = { 0x70000, 0x70800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .perf = {
+ .ab_inefficiency = 100,
+ .ib_inefficiency = 200,
+ .clk_inefficiency = 105
+ },
+ .max_clk = 400000000,
+};
+
+static const struct mdp5_cfg_hw msm8917_config = {
+ .name = "msm8917",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
+static const struct mdp5_cfg_hw msm8998_config = {
+ .name = "msm8998",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf7ffffff,
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000, 0x2a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 2,
+ .base = { 0x34000, 0x36000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 6,
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x78000, 0x78800, 0x79000 },
+ },
+ .pp = {
+ .count = 4,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
+ .connect = {
+ [0] = INTF_eDP,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_hw sdm630_config = {
+ .name = "sdm630",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x46000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x78000, 0x78800 },
+ },
+ .pp = {
+ .count = 3,
+ .base = { 0x70000, 0x71000, 0x72000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_hw sdm660_config = {
+ .name = "sdm660",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_DSC |
+ MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
+ 0,
+ },
+ .ctl = {
+ .count = 5,
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
+ .flush_hw_mask = 0xf4ffffff,
+ },
+ .pipe_vig = {
+ .count = 2,
+ .base = { 0x04000, 0x6000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 2, /* driver supports max of 2 currently */
+ .base = { 0x24000, 0x26000, 0x28000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 4,
+ .base = { 0x44000, 0x45000, 0x46000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 2,
+ .base = { 0x54000, 0x56000 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x78000, 0x78800 },
+ },
+ .pp = {
+ .count = 5,
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800, 0x72000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .dsc = {
+ .count = 2,
+ .base = { 0x80000, 0x80400 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ [3] = INTF_HDMI,
+ },
+ },
+ .max_clk = 412500000,
+};
+
+static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
+ { .revision = 0, .config = { .hw = &msm8x74v1_config } },
+ { .revision = 2, .config = { .hw = &msm8x74v2_config } },
+ { .revision = 3, .config = { .hw = &apq8084_config } },
+ { .revision = 6, .config = { .hw = &msm8x16_config } },
+ { .revision = 8, .config = { .hw = &msm8x36_config } },
+ { .revision = 9, .config = { .hw = &msm8x94_config } },
+ { .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 11, .config = { .hw = &msm8x76_config } },
+ { .revision = 15, .config = { .hw = &msm8917_config } },
+ { .revision = 16, .config = { .hw = &msm8x53_config } },
+};
+
+static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
+ { .revision = 0, .config = { .hw = &msm8998_config } },
+ { .revision = 2, .config = { .hw = &sdm660_config } },
+ { .revision = 3, .config = { .hw = &sdm630_config } },
+};
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->config.hw;
+}
+
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
+{
+ return &cfg_handler->config;
+}
+
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
+{
+ return cfg_handler->revision;
+}
+
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
+{
+ kfree(cfg_handler);
+}
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct mdp5_cfg_handler *cfg_handler;
+ const struct mdp5_cfg_handler *cfg_handlers;
+ int i, ret = 0, num_handlers;
+
+ cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+ if (unlikely(!cfg_handler)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ switch (major) {
+ case 1:
+ cfg_handlers = cfg_handlers_v1;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v1);
+ break;
+ case 3:
+ cfg_handlers = cfg_handlers_v3;
+ num_handlers = ARRAY_SIZE(cfg_handlers_v3);
+ break;
+ default:
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* only after mdp5_cfg global pointer's init can we access the hw */
+ for (i = 0; i < num_handlers; i++) {
+ if (cfg_handlers[i].revision != minor)
+ continue;
+ mdp5_cfg = cfg_handlers[i].config.hw;
+
+ break;
+ }
+ if (unlikely(!mdp5_cfg)) {
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ cfg_handler->revision = minor;
+ cfg_handler->config.hw = mdp5_cfg;
+
+ DBG("MDP5: %s hw config selected", mdp5_cfg->name);
+
+ return cfg_handler;
+
+fail:
+ if (cfg_handler)
+ mdp5_cfg_destroy(cfg_handler);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
new file mode 100644
index 000000000..c2502cc33
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDP5_CFG_H__
+#define __MDP5_CFG_H__
+
+#include "msm_drv.h"
+
+/*
+ * mdp5_cfg
+ *
+ * This module configures the dynamic offsets used by mdp5.xml.h
+ * (initialized in mdp5_cfg.c)
+ */
+extern const struct mdp5_cfg_hw *mdp5_cfg;
+
+#define MAX_CTL 8
+#define MAX_BASES 8
+#define MAX_SMP_BLOCKS 44
+#define MAX_CLIENTS 32
+
+typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
+
+#define MDP5_SUB_BLOCK_DEFINITION \
+ unsigned int count; \
+ uint32_t base[MAX_BASES]
+
+struct mdp5_sub_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+};
+
+struct mdp5_lm_instance {
+ int id;
+ int pp;
+ int dspp;
+ uint32_t caps;
+};
+
+struct mdp5_lm_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ struct mdp5_lm_instance instances[MAX_BASES];
+ uint32_t nb_stages; /* number of stages per blender */
+ uint32_t max_width; /* Maximum output resolution */
+ uint32_t max_height;
+};
+
+struct mdp5_pipe_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t caps; /* pipe capabilities */
+};
+
+struct mdp5_ctl_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t flush_hw_mask; /* FLUSH register's hardware mask */
+};
+
+struct mdp5_smp_block {
+ int mmb_count; /* number of SMP MMBs */
+ int mmb_size; /* MMB: size in bytes */
+ uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */
+ mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
+ uint8_t reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
+};
+
+struct mdp5_mdp_block {
+ MDP5_SUB_BLOCK_DEFINITION;
+ uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */
+};
+
+#define MDP5_INTF_NUM_MAX 5
+
+struct mdp5_intf_block {
+ uint32_t base[MAX_BASES];
+ u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+};
+
+struct mdp5_perf_block {
+ u32 ab_inefficiency;
+ u32 ib_inefficiency;
+ u32 clk_inefficiency;
+};
+
+struct mdp5_cfg_hw {
+ char *name;
+
+ struct mdp5_mdp_block mdp;
+ struct mdp5_smp_block smp;
+ struct mdp5_ctl_block ctl;
+ struct mdp5_pipe_block pipe_vig;
+ struct mdp5_pipe_block pipe_rgb;
+ struct mdp5_pipe_block pipe_dma;
+ struct mdp5_pipe_block pipe_cursor;
+ struct mdp5_lm_block lm;
+ struct mdp5_sub_block dspp;
+ struct mdp5_sub_block ad;
+ struct mdp5_sub_block pp;
+ struct mdp5_sub_block dsc;
+ struct mdp5_sub_block cdm;
+ struct mdp5_intf_block intf;
+ struct mdp5_perf_block perf;
+
+ uint32_t max_clk;
+};
+
+struct mdp5_cfg {
+ const struct mdp5_cfg_hw *hw;
+};
+
+struct mdp5_kms;
+struct mdp5_cfg_handler;
+
+const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
+struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
+int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
+
+#define mdp5_cfg_intf_is_virtual(intf_type) ({ \
+ typeof(intf_type) __val = (intf_type); \
+ (__val) >= INTF_VIRTUAL ? true : false; })
+
+struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
+ uint32_t major, uint32_t minor);
+void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
+
+#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
new file mode 100644
index 000000000..a640af22e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp5_kms.h"
+
+#ifdef CONFIG_DRM_MSM_DSI
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+#define VSYNC_CLK_RATE 19200000
+static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct device *dev = encoder->dev->dev;
+ u32 total_lines, vclks_line, cfg;
+ long vsync_clk_speed;
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
+
+ if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
+ DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
+ return -EINVAL;
+ }
+
+ total_lines = mode->vtotal * drm_mode_vrefresh(mode);
+ if (!total_lines) {
+ DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ __func__, mode->vtotal, drm_mode_vrefresh(mode));
+ return -EINVAL;
+ }
+
+ vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
+ if (vsync_clk_speed <= 0) {
+ DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
+ vsync_clk_speed);
+ return -EINVAL;
+ }
+ vclks_line = vsync_clk_speed / total_lines;
+
+ cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
+ | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
+ cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
+
+ /*
+ * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
+ * the vsync_clk equating to roughly half the desired panel refresh rate.
+ * This is only necessary as stability fallback if interrupts from the
+ * panel arrive too late or not at all, but is currently used by default
+ * because these panel interrupts are not wired up yet.
+ */
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
+
+ mdp5_write(mdp5_kms,
+ REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay);
+ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
+ MDP5_PP_SYNC_THRESH_START(4) |
+ MDP5_PP_SYNC_THRESH_CONTINUE(4));
+ mdp5_write(mdp5_kms, REG_MDP5_PP_AUTOREFRESH_CONFIG(pp_id), 0x0);
+
+ return 0;
+}
+
+static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
+ int ret;
+
+ ret = clk_set_rate(mdp5_kms->vsync_clk,
+ clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
+ if (ret) {
+ DRM_DEV_ERROR(encoder->dev->dev,
+ "vsync_clk clk_set_rate failed, %d\n", ret);
+ return ret;
+ }
+ ret = clk_prepare_enable(mdp5_kms->vsync_clk);
+ if (ret) {
+ DRM_DEV_ERROR(encoder->dev->dev,
+ "vsync_clk clk_prepare_enable failed, %d\n", ret);
+ return ret;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1);
+
+ return 0;
+}
+
+static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
+ clk_disable_unprepare(mdp5_kms->vsync_clk);
+}
+
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+ pingpong_tearcheck_setup(encoder, mode);
+ mdp5_crtc_set_pipeline(encoder->crtc);
+}
+
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+
+ if (WARN_ON(!mdp5_cmd_enc->enabled))
+ return;
+
+ pingpong_tearcheck_disable(encoder);
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ mdp5_cmd_enc->enabled = false;
+}
+
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
+ struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+
+ if (WARN_ON(mdp5_cmd_enc->enabled))
+ return;
+
+ if (pingpong_tearcheck_enable(encoder))
+ return;
+
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
+
+ mdp5_cmd_enc->enabled = true;
+}
+
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms;
+ struct device *dev;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_cmd_enc->intf->num;
+
+ /* Switch slave encoder's trigger MUX, to use the master's
+ * start signal for the slave encoder
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ else
+ return -EINVAL;
+
+ /* Smart Panel, Sync mode */
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+
+ dev = &mdp5_kms->pdev->dev;
+
+ /* Make sure clocks are on when connectors calling this function. */
+ pm_runtime_get_sync(dev);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
+
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
new file mode 100644
index 000000000..86036dd4e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -0,0 +1,1360 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/sort.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "mdp5_kms.h"
+#include "msm_gem.h"
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+struct mdp5_crtc {
+ struct drm_crtc base;
+ int id;
+ bool enabled;
+
+ spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+
+ /* Bits have been flushed at the last commit,
+ * used to decide if a vsync has happened since last commit.
+ */
+ u32 flushed_mask;
+
+#define PENDING_CURSOR 0x1
+#define PENDING_FLIP 0x2
+ atomic_t pending;
+
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
+ struct mdp_irq vblank;
+ struct mdp_irq err;
+ struct mdp_irq pp_done;
+
+ struct completion pp_completion;
+
+ bool lm_cursor_enabled;
+
+ struct {
+ /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
+ spinlock_t lock;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ uint64_t iova;
+ uint32_t width, height;
+ int x, y;
+ } cursor;
+};
+#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
+
+static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ atomic_or(pending, &mdp5_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+}
+
+static void request_pp_done_pending(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ reinit_completion(&mdp5_crtc->pp_completion);
+}
+
+static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ bool start = !mdp5_cstate->defer_start;
+
+ mdp5_cstate->defer_start = false;
+
+ DBG("%s: flush=%08x", crtc->name, flush_mask);
+
+ return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
+}
+
+/*
+ * flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+static u32 crtc_flush_all(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_hw_mixer *mixer, *r_mixer;
+ struct drm_plane *plane;
+ uint32_t flush_mask = 0;
+
+ /* this should not happen: */
+ if (WARN_ON(!mdp5_cstate->ctl))
+ return 0;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (!plane->state->visible)
+ continue;
+ flush_mask |= mdp5_plane_get_flush(plane);
+ }
+
+ mixer = mdp5_cstate->pipeline.mixer;
+ flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
+
+ r_mixer = mdp5_cstate->pipeline.r_mixer;
+ if (r_mixer)
+ flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
+
+ return crtc_flush(crtc, flush_mask);
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp5_crtc->event;
+ if (event) {
+ mdp5_crtc->event = NULL;
+ DBG("%s: send event: %p", crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ if (ctl && !crtc->state->enable) {
+ /* set STAGE_UNUSED for all layers */
+ mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
+ /* XXX: What to do here? */
+ /* mdp5_crtc->ctl = NULL; */
+ }
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp5_crtc *mdp5_crtc =
+ container_of(work, struct mdp5_crtc, unref_cursor_work);
+ struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
+ struct msm_kms *kms = &mdp5_kms->base.base;
+
+ msm_gem_unpin_iova(val, kms->aspace);
+ drm_gem_object_put(val);
+}
+
+static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
+
+ kfree(mdp5_crtc);
+}
+
+static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
+{
+ switch (stage) {
+ case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA;
+ case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA;
+ case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA;
+ case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA;
+ case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA;
+ case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA;
+ case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * left/right pipe offsets for the stage array used in blend_setup()
+ */
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+
+/*
+ * blend_setup() - blend all the planes of a CRTC
+ *
+ * If no base layer is available, border will be enabled as the base layer.
+ * Otherwise all layers will be blended based on their stage calculated
+ * in mdp5_crtc_atomic_check.
+ */
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
+ const struct mdp_format *format;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ uint32_t lm = mixer->lm;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
+ uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
+ unsigned long flags;
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ int i, plane_cnt = 0;
+ bool bg_alpha_enabled = false;
+ u32 mixer_op_mode = 0;
+ u32 val;
+#define blender(stage) ((stage) - STAGE0)
+
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+
+ /* ctl could be released already when we are shutting down: */
+ /* XXX: Can this happen now? */
+ if (!ctl)
+ goto out;
+
+ /* Collect all plane information */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp5_pipe right_pipe;
+
+ if (!plane->state->visible)
+ continue;
+
+ pstate = to_mdp5_plane_state(plane->state);
+ pstates[pstate->stage] = pstate;
+ stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
+ /*
+ * if we have a right mixer, stage the same pipe as we
+ * have on the left mixer
+ */
+ if (r_mixer)
+ r_stage[pstate->stage][PIPE_LEFT] =
+ mdp5_plane_pipe(plane);
+ /*
+ * if we have a right pipe (i.e, the plane comprises of 2
+ * hwpipes, then stage the right pipe on the right side of both
+ * the layer mixers
+ */
+ right_pipe = mdp5_plane_right_pipe(plane);
+ if (right_pipe) {
+ stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ }
+
+ plane_cnt++;
+ }
+
+ if (!pstates[STAGE_BASE]) {
+ ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
+ DBG("Border Color is enabled");
+ } else if (plane_cnt) {
+ format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb));
+
+ if (format->alpha_enable)
+ bg_alpha_enabled = true;
+ }
+
+ /* The reset for blending */
+ for (i = STAGE0; i <= STAGE_MAX; i++) {
+ if (!pstates[i])
+ continue;
+
+ format = to_mdp_format(
+ msm_framebuffer_format(pstates[i]->base.fb));
+ plane = pstates[i]->base.plane;
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST);
+ fg_alpha = pstates[i]->base.alpha >> 8;
+ bg_alpha = 0xFF - fg_alpha;
+
+ if (!format->alpha_enable && bg_alpha_enabled)
+ mixer_op_mode = 0;
+ else
+ mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i);
+
+ DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha);
+
+ if (format->alpha_enable &&
+ pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ } else if (format->alpha_enable &&
+ pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
+ blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) |
+ MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL);
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |=
+ MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA |
+ MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA;
+ }
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm,
+ blender(i)), blend_op);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
+ blender(i)), fg_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
+ blender(i)), bg_alpha);
+ if (r_mixer) {
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
+ blender(i)), blend_op);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
+ blender(i)), fg_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
+ blender(i)), bg_alpha);
+ }
+ }
+
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
+ val | mixer_op_mode);
+ if (r_mixer) {
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
+ val | mixer_op_mode);
+ }
+
+ mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
+ ctl_blend_flags);
+out:
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
+}
+
+static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
+ struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
+ uint32_t lm = mixer->lm;
+ u32 mixer_width, val;
+ unsigned long flags;
+ struct drm_display_mode *mode;
+
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
+
+ DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
+
+ mixer_width = mode->hdisplay;
+ if (r_mixer)
+ mixer_width /= 2;
+
+ spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to LEFT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
+
+ if (r_mixer) {
+ u32 r_lm = r_mixer->lm;
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to RIGHT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
+ }
+
+ spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
+}
+
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = mdp5_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder)
+ return 0;
+
+ return mdp5_encoder_get_framecount(encoder);
+}
+
+static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
+ unsigned long flags;
+
+ DBG("%s", crtc->name);
+
+ if (WARN_ON(!mdp5_crtc->enabled))
+ return;
+
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
+ if (mdp5_cstate->cmd_mode)
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
+
+ mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
+ pm_runtime_put_sync(dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp5_crtc->event);
+ spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
+ }
+
+ mdp5_crtc->enabled = false;
+}
+
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ u32 count;
+
+ count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+ drm_crtc_set_max_vblank_count(crtc, count);
+
+ drm_crtc_vblank_on(crtc);
+}
+
+static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ DBG("%s", crtc->name);
+
+ if (WARN_ON(mdp5_crtc->enabled))
+ return;
+
+ pm_runtime_get_sync(dev);
+
+ if (mdp5_crtc->lm_cursor_enabled) {
+ /*
+ * Restore LM cursor state, as it might have been lost
+ * with suspend:
+ */
+ if (mdp5_crtc->cursor.iova) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ mdp5_crtc_restore_cursor(crtc);
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, true);
+ } else {
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, false);
+ }
+ }
+
+ /* Restore vblank irq handling after power is enabled */
+ mdp5_crtc_vblank_on(crtc);
+
+ mdp5_crtc_mode_set_nofb(crtc);
+
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
+
+ if (mdp5_cstate->cmd_mode)
+ mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
+
+ mdp5_crtc->enabled = true;
+}
+
+static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ bool need_right_mixer)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_interface *intf;
+ bool new_mixer = false;
+
+ new_mixer = !pipeline->mixer;
+
+ if ((need_right_mixer && !pipeline->r_mixer) ||
+ (!need_right_mixer && pipeline->r_mixer))
+ new_mixer = true;
+
+ if (new_mixer) {
+ struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
+ u32 caps;
+ int ret;
+
+ caps = MDP_LM_CAP_DISPLAY;
+ if (need_right_mixer)
+ caps |= MDP_LM_CAP_PAIR;
+
+ ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
+ &pipeline->mixer, need_right_mixer ?
+ &pipeline->r_mixer : NULL);
+ if (ret)
+ return ret;
+
+ ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (ret)
+ return ret;
+
+ if (old_r_mixer) {
+ ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (ret)
+ return ret;
+
+ if (!need_right_mixer)
+ pipeline->r_mixer = NULL;
+ }
+ }
+
+ /*
+ * these should have been already set up in the encoder's atomic
+ * check (called by drm_atomic_helper_check_modeset)
+ */
+ intf = pipeline->intf;
+
+ mdp5_cstate->err_irqmask = intf2err(intf->num);
+ mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
+ mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
+ mdp5_cstate->cmd_mode = true;
+ } else {
+ mdp5_cstate->pp_done_irqmask = 0;
+ mdp5_cstate->cmd_mode = false;
+ }
+
+ return 0;
+}
+
+struct plane_state {
+ struct drm_plane *plane;
+ struct mdp5_plane_state *state;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+ struct plane_state *pa = (struct plane_state *)a;
+ struct plane_state *pb = (struct plane_state *)b;
+ return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos;
+}
+
+/* is there a helper for this? */
+static bool is_fullscreen(struct drm_crtc_state *cstate,
+ struct drm_plane_state *pstate)
+{
+ return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
+ ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
+ ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
+}
+
+static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ struct drm_plane_state *bpstate)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+
+ /*
+ * if we're in source split mode, it's mandatory to have
+ * border out on the base stage
+ */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return STAGE0;
+
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if (!is_fullscreen(new_crtc_state, bpstate))
+ return STAGE0;
+
+ return STAGE_BASE;
+}
+
+static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ struct drm_device *dev = crtc->dev;
+ struct plane_state pstates[STAGE_MAX + 1];
+ const struct mdp5_cfg_hw *hw_cfg;
+ const struct drm_plane_state *pstate;
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ bool cursor_plane = false;
+ bool need_right_mixer = false;
+ int cnt = 0, i;
+ int ret;
+ enum mdp_mixer_stage_id start;
+
+ DBG("%s: check", crtc->name);
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+ struct mdp5_plane_state *mdp5_pstate =
+ to_mdp5_plane_state(pstate);
+
+ if (!pstate->visible)
+ continue;
+
+ pstates[cnt].plane = plane;
+ pstates[cnt].state = to_mdp5_plane_state(pstate);
+
+ mdp5_pstate->needs_dirtyfb =
+ intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
+
+ /*
+ * if any plane on this crtc uses 2 hwpipes, then we need
+ * the crtc to have a right hwmixer.
+ */
+ if (pstates[cnt].state->r_hwpipe)
+ need_right_mixer = true;
+ cnt++;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor_plane = true;
+ }
+
+ /* bail out early if there aren't any planes */
+ if (!cnt)
+ return 0;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /*
+ * we need a right hwmixer if the mode's width is greater than a single
+ * LM's max width
+ */
+ if (mode->hdisplay > hw_cfg->lm.max_width)
+ need_right_mixer = true;
+
+ ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
+ return ret;
+ }
+
+ /* assign a stage based on sorted zpos property */
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+ /* trigger a warning if cursor isn't the highest zorder */
+ WARN_ON(cursor_plane &&
+ (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
+
+ start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
+ DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ cnt, start);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (cursor_plane && (i == (cnt - 1)))
+ pstates[i].state->stage = hw_cfg->lm.nb_stages;
+ else
+ pstates[i].state->stage = start + i;
+ DBG("%s: assign pipe %s on stage=%d", crtc->name,
+ pstates[i].plane->name,
+ pstates[i].state->stage);
+ }
+
+ return 0;
+}
+
+static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ DBG("%s: begin", crtc->name);
+}
+
+static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ DBG("%s: event: %p", crtc->name, crtc->state->event);
+
+ WARN_ON(mdp5_crtc->event);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ mdp5_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /*
+ * If no CTL has been allocated in mdp5_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ /* XXX: Can this happen now ? */
+ if (unlikely(!mdp5_cstate->ctl))
+ return;
+
+ blend_setup(crtc);
+
+ /* PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+ if (mdp5_cstate->cmd_mode)
+ request_pp_done_pending(crtc);
+
+ mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
+
+ /* XXX are we leaking out state here? */
+ mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
+ mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
+ mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
+
+ request_pending(crtc, PENDING_FLIP);
+}
+
+static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ uint32_t xres = crtc->mode.hdisplay;
+ uint32_t yres = crtc->mode.vdisplay;
+
+ /*
+ * Cursor Region Of Interest (ROI) is a plane read from cursor
+ * buffer to render. The ROI region is determined by the visibility of
+ * the cursor point. In the default Cursor image the cursor point will
+ * be at the top left of the cursor image.
+ *
+ * Without rotation:
+ * If the cursor point reaches the right (xres - x < cursor.width) or
+ * bottom (yres - y < cursor.height) boundary of the screen, then ROI
+ * width and ROI height need to be evaluated to crop the cursor image
+ * accordingly.
+ * (xres-x) will be new cursor width when x > (xres - cursor.width)
+ * (yres-y) will be new cursor height when y > (yres - cursor.height)
+ *
+ * With rotation:
+ * We get negative x and/or y coordinates.
+ * (cursor.width - abs(x)) will be new cursor width when x < 0
+ * (cursor.height - abs(y)) will be new cursor width when y < 0
+ */
+ if (mdp5_crtc->cursor.x >= 0)
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
+ mdp5_crtc->cursor.x);
+ else
+ *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+ if (mdp5_crtc->cursor.y >= 0)
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
+ mdp5_crtc->cursor.y);
+ else
+ *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
+}
+
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
+ uint32_t blendcfg, stride;
+ uint32_t x, y, src_x, src_y, width, height;
+ uint32_t roi_w, roi_h;
+ int lm;
+
+ assert_spin_locked(&mdp5_crtc->cursor.lock);
+
+ lm = mdp5_cstate->pipeline.mixer->lm;
+
+ x = mdp5_crtc->cursor.x;
+ y = mdp5_crtc->cursor.y;
+ width = mdp5_crtc->cursor.width;
+ height = mdp5_crtc->cursor.height;
+
+ stride = width * info->cpp[0];
+
+ get_roi(crtc, &roi_w, &roi_h);
+
+ /* If cusror buffer overlaps due to rotation on the
+ * upper or left screen border the pixel offset inside
+ * the cursor buffer of the ROI is the positive overlap
+ * distance.
+ */
+ if (mdp5_crtc->cursor.x < 0) {
+ src_x = abs(mdp5_crtc->cursor.x);
+ x = 0;
+ } else {
+ src_x = 0;
+ }
+ if (mdp5_crtc->cursor.y < 0) {
+ src_y = abs(mdp5_crtc->cursor.y);
+ y = 0;
+ } else {
+ src_y = 0;
+ }
+ DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+ crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
+ MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
+ MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+ MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
+ MDP5_LM_CURSOR_START_XY_Y_START(y) |
+ MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+ MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+ MDP5_LM_CURSOR_XY_SRC_X(src_x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
+ mdp5_crtc->cursor.iova);
+
+ blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
+ blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
+}
+
+static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct drm_device *dev = crtc->dev;
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct platform_device *pdev = mdp5_kms->pdev;
+ struct msm_kms *kms = &mdp5_kms->base.base;
+ struct drm_gem_object *cursor_bo, *old_bo = NULL;
+ struct mdp5_ctl *ctl;
+ int ret;
+ uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ bool cursor_enable = true;
+ unsigned long flags;
+
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_set is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ ctl = mdp5_cstate->ctl;
+ if (!ctl)
+ return -EINVAL;
+
+ /* don't support LM cursors when we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return -EINVAL;
+
+ if (!handle) {
+ DBG("Cursor off");
+ cursor_enable = false;
+ mdp5_crtc->cursor.iova = 0;
+ pm_runtime_get_sync(&pdev->dev);
+ goto set_cursor;
+ }
+
+ cursor_bo = drm_gem_object_lookup(file, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
+ &mdp5_crtc->cursor.iova);
+ if (ret) {
+ drm_gem_object_put(cursor_bo);
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ old_bo = mdp5_crtc->cursor.scanout_bo;
+
+ mdp5_crtc->cursor.scanout_bo = cursor_bo;
+ mdp5_crtc->cursor.width = width;
+ mdp5_crtc->cursor.height = height;
+
+ mdp5_crtc_restore_cursor(crtc);
+
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+set_cursor:
+ ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
+ cursor_enable ? "en" : "dis", ret);
+ goto end;
+ }
+
+ crtc_flush(crtc, flush_mask);
+
+end:
+ pm_runtime_put_sync(&pdev->dev);
+ if (old_bo) {
+ drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
+ /* enable vblank to complete cursor work: */
+ request_pending(crtc, PENDING_CURSOR);
+ }
+ return ret;
+}
+
+static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ struct drm_device *dev = crtc->dev;
+ uint32_t roi_w;
+ uint32_t roi_h;
+ unsigned long flags;
+
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_move is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
+ /* don't support LM cursors when we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return -EINVAL;
+
+ /* In case the CRTC is disabled, just drop the cursor update */
+ if (unlikely(!crtc->state->enable))
+ return 0;
+
+ /* accept negative x/y coordinates up to maximum cursor overlap */
+ mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+ mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
+
+ get_roi(crtc, &roi_w, &roi_h);
+
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ mdp5_crtc_restore_cursor(crtc);
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc, flush_mask);
+
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
+
+ return 0;
+}
+
+static void
+mdp5_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
+
+ if (WARN_ON(!pipeline))
+ return;
+
+ if (mdp5_cstate->ctl)
+ drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
+
+ drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
+ pipeline->mixer->name : "(null)");
+
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
+ pipeline->r_mixer->name : "(null)");
+
+ drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
+}
+
+static struct drm_crtc_state *
+mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
+ sizeof(*mdp5_cstate), GFP_KERNEL);
+ if (!mdp5_cstate)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
+
+ return &mdp5_cstate->base;
+}
+
+static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(mdp5_cstate);
+}
+
+static void mdp5_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
+
+ if (crtc->state)
+ mdp5_crtc_destroy_state(crtc, crtc->state);
+
+ if (mdp5_cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
+}
+
+static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_funcs mdp5_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = mdp5_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .cursor_set = mdp5_crtc_cursor_set,
+ .cursor_move = mdp5_crtc_cursor_move,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
+ .mode_set_nofb = mdp5_crtc_mode_set_nofb,
+ .atomic_check = mdp5_crtc_atomic_check,
+ .atomic_begin = mdp5_crtc_atomic_begin,
+ .atomic_flush = mdp5_crtc_atomic_flush,
+ .atomic_enable = mdp5_crtc_atomic_enable,
+ .atomic_disable = mdp5_crtc_atomic_disable,
+ .get_scanout_position = mdp5_crtc_get_scanout_position,
+};
+
+static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
+ struct drm_crtc *crtc = &mdp5_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ unsigned pending;
+
+ mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
+
+ pending = atomic_xchg(&mdp5_crtc->pending, 0);
+
+ if (pending & PENDING_FLIP) {
+ complete_flip(crtc, NULL);
+ }
+
+ if (pending & PENDING_CURSOR)
+ drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
+}
+
+static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
+
+ DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus);
+}
+
+static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
+ pp_done);
+
+ complete_all(&mdp5_crtc->pp_completion);
+}
+
+static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ int ret;
+
+ ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
+ msecs_to_jiffies(50));
+ if (ret == 0)
+ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_cstate->pipeline.mixer->lm);
+}
+
+static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ int ret;
+
+ /* Should not call this function if crtc is disabled. */
+ if (!ctl)
+ return;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ return;
+
+ ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
+ ((mdp5_ctl_get_commit_status(ctl) &
+ mdp5_crtc->flushed_mask) == 0),
+ msecs_to_jiffies(50));
+ if (ret <= 0)
+ dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
+
+ mdp5_crtc->flushed_mask = 0;
+
+ drm_crtc_vblank_put(crtc);
+}
+
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ return mdp5_crtc->vblank.irqmask;
+}
+
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+
+ /* should this be done elsewhere ? */
+ mdp_irq_update(&mdp5_kms->base);
+
+ mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
+}
+
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return mdp5_cstate->ctl;
+}
+
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
+ ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
+}
+
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return &mdp5_cstate->pipeline;
+}
+
+void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ if (mdp5_cstate->cmd_mode)
+ mdp5_crtc_wait_for_pp_done(crtc);
+ else
+ mdp5_crtc_wait_for_flush_done(crtc);
+}
+
+/* initialize crtc */
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ struct drm_plane *cursor_plane, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp5_crtc *mdp5_crtc;
+
+ mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
+ if (!mdp5_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &mdp5_crtc->base;
+
+ mdp5_crtc->id = id;
+
+ spin_lock_init(&mdp5_crtc->lm_lock);
+ spin_lock_init(&mdp5_crtc->cursor.lock);
+ init_completion(&mdp5_crtc->pp_completion);
+
+ mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
+ mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+ mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
+
+ mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
+
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+ cursor_plane ?
+ &mdp5_crtc_no_lm_cursor_funcs :
+ &mdp5_crtc_funcs, NULL);
+
+ drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
+ "unref cursor", unref_cursor_worker);
+
+ drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
+
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
new file mode 100644
index 000000000..1220f2b20
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ */
+
+#include "mdp5_kms.h"
+#include "mdp5_ctl.h"
+
+/*
+ * CTL - MDP Control Pool Manager
+ *
+ * Controls are shared between all display interfaces.
+ *
+ * They are intended to be used for data path configuration.
+ * The top level register programming describes the complete data path for
+ * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
+ *
+ * Hardware capabilities determine the number of concurrent data paths
+ *
+ * In certain use cases (high-resolution dual pipe), one single CTL can be
+ * shared across multiple CRTCs.
+ */
+
+#define CTL_STAT_BUSY 0x1
+#define CTL_STAT_BOOKED 0x2
+
+struct mdp5_ctl {
+ struct mdp5_ctl_manager *ctlm;
+
+ u32 id;
+
+ /* CTL status bitmask */
+ u32 status;
+
+ bool encoder_enabled;
+
+ /* pending flush_mask bits */
+ u32 flush_mask;
+
+ /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
+ spinlock_t hw_lock;
+ u32 reg_offset;
+
+ /* when do CTL registers need to be flushed? (mask of trigger bits) */
+ u32 pending_ctl_trigger;
+
+ bool cursor_on;
+
+ /* True if the current CTL has FLUSH bits pending for single FLUSH. */
+ bool flush_pending;
+
+ struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
+};
+
+struct mdp5_ctl_manager {
+ struct drm_device *dev;
+
+ /* number of CTL / Layer Mixers in this hw config: */
+ u32 nlm;
+ u32 nctl;
+
+ /* to filter out non-present bits in the current hardware config */
+ u32 flush_hw_mask;
+
+ /* status for single FLUSH */
+ bool single_flush_supported;
+ u32 single_flush_pending_mask;
+
+ /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
+ spinlock_t pool_lock;
+ struct mdp5_ctl ctls[MAX_CTL];
+};
+
+static inline
+struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
+{
+ struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline
+void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ mdp5_write(mdp5_kms, reg, data);
+}
+
+static inline
+u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+
+ (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
+ return mdp5_read(mdp5_kms, reg);
+}
+
+static void set_display_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
+{
+ unsigned long flags;
+ u32 intf_sel;
+
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+
+ switch (intf->num) {
+ case 0:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
+ break;
+ case 1:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
+ break;
+ case 2:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
+ break;
+ case 3:
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+}
+
+static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
+{
+ unsigned long flags;
+ struct mdp5_interface *intf = pipeline->intf;
+ u32 ctl_op = 0;
+
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
+
+ switch (intf->type) {
+ case INTF_DSI:
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ ctl_op |= MDP5_CTL_OP_CMD_MODE;
+ break;
+
+ case INTF_WB:
+ if (intf->mode == MDP5_INTF_WB_MODE_LINE)
+ ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
+ break;
+
+ default:
+ break;
+ }
+
+ if (pipeline->r_mixer)
+ ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
+ MDP5_CTL_OP_PACK_3D(1);
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
+ struct mdp5_interface *intf = pipeline->intf;
+
+ /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
+ if (!mdp5_cfg_intf_is_virtual(intf->type))
+ set_display_intf(mdp5_kms, intf);
+
+ set_ctl_op(ctl, pipeline);
+
+ return 0;
+}
+
+static bool start_signal_needed(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline)
+{
+ struct mdp5_interface *intf = pipeline->intf;
+
+ if (!ctl->encoder_enabled)
+ return false;
+
+ switch (intf->type) {
+ case INTF_WB:
+ return true;
+ case INTF_DSI:
+ return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
+ default:
+ return false;
+ }
+}
+
+/*
+ * send_start_signal() - Overlay Processor Start Signal
+ *
+ * For a given control operation (display pipeline), a START signal needs to be
+ * executed in order to kick off operation and activate all layers.
+ * e.g.: DSI command mode, Writeback
+ */
+static void send_start_signal(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+/**
+ * mdp5_ctl_set_encoder_state() - set the encoder state
+ *
+ * @ctl: the CTL instance
+ * @pipeline: the encoder's INTF + MIXER configuration
+ * @enabled: true, when encoder is ready for data streaming; false, otherwise.
+ *
+ * Note:
+ * This encoder state is needed to trigger START signal (data path kickoff).
+ */
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ bool enabled)
+{
+ struct mdp5_interface *intf = pipeline->intf;
+
+ if (WARN_ON(!ctl))
+ return -EINVAL;
+
+ ctl->encoder_enabled = enabled;
+ DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
+
+ if (start_signal_needed(ctl, pipeline)) {
+ send_start_signal(ctl);
+ }
+
+ return 0;
+}
+
+/*
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
+ */
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+ u32 blend_cfg;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+
+ if (WARN_ON(!mixer)) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ ctl->id);
+ return -EINVAL;
+ }
+
+ if (pipeline->r_mixer) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
+
+ if (enable)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+ else
+ blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
+ ctl->cursor_on = enable;
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
+
+ return 0;
+}
+
+static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
+ case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
+ case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
+ case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
+ case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
+ case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
+ case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
+ case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
+ case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
+ case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
+ case SSPP_CURSOR0:
+ case SSPP_CURSOR1:
+ default: return 0;
+ }
+}
+
+static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
+ enum mdp_mixer_stage_id stage)
+{
+ if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
+ return 0;
+
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
+ case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
+ case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
+ case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
+ case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
+ case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
+ case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
+ case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
+ case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
+ case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
+ case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
+ case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
+ default: return 0;
+ }
+}
+
+static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ int i;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ for (i = 0; i < ctl_mgr->nlm; i++) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
+ }
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags)
+{
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
+ unsigned long flags;
+ u32 blend_cfg = 0, blend_ext_cfg = 0;
+ u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
+ int i, start_stage;
+
+ mdp5_ctl_reset_blend_regs(ctl);
+
+ if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
+ start_stage = STAGE0;
+ blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ if (r_mixer)
+ r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ } else {
+ start_stage = STAGE_BASE;
+ }
+
+ for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
+ blend_cfg |=
+ mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
+ blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
+ if (r_mixer) {
+ r_blend_cfg |=
+ mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
+ r_blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
+ }
+ }
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ if (ctl->cursor_on)
+ blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
+
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
+ blend_ext_cfg);
+ if (r_mixer) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
+ r_blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
+ r_blend_ext_cfg);
+ }
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
+ if (r_mixer)
+ ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
+
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
+ blend_cfg, blend_ext_cfg);
+ if (r_mixer)
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
+ r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
+
+ return 0;
+}
+
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
+{
+ if (intf->type == INTF_WB)
+ return MDP5_CTL_FLUSH_WB;
+
+ switch (intf->num) {
+ case 0: return MDP5_CTL_FLUSH_TIMING_0;
+ case 1: return MDP5_CTL_FLUSH_TIMING_1;
+ case 2: return MDP5_CTL_FLUSH_TIMING_2;
+ case 3: return MDP5_CTL_FLUSH_TIMING_3;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_cursor(int cursor_id)
+{
+ switch (cursor_id) {
+ case 0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case 1: return MDP5_CTL_FLUSH_CURSOR_1;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
+ case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
+ case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
+ case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
+ case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
+ case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
+ case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
+ case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
+ case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
+ case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
+ default: return 0;
+ }
+}
+
+u32 mdp_ctl_flush_mask_lm(int lm)
+{
+ switch (lm) {
+ case 0: return MDP5_CTL_FLUSH_LM0;
+ case 1: return MDP5_CTL_FLUSH_LM1;
+ case 2: return MDP5_CTL_FLUSH_LM2;
+ case 3: return MDP5_CTL_FLUSH_LM3;
+ case 4: return MDP5_CTL_FLUSH_LM4;
+ case 5: return MDP5_CTL_FLUSH_LM5;
+ default: return 0;
+ }
+}
+
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ u32 sw_mask = 0;
+#define BIT_NEEDS_SW_FIX(bit) \
+ (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
+
+ /* for some targets, cursor bit is the same as LM bit */
+ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
+ sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
+
+ return sw_mask;
+}
+
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+ u32 *flush_id)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+ if (ctl->pair) {
+ DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+ ctl->flush_pending = true;
+ ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+ *flush_mask = 0;
+
+ if (ctl->pair->flush_pending) {
+ *flush_id = min_t(u32, ctl->id, ctl->pair->id);
+ *flush_mask = ctl_mgr->single_flush_pending_mask;
+
+ ctl->flush_pending = false;
+ ctl->pair->flush_pending = false;
+ ctl_mgr->single_flush_pending_mask = 0;
+
+ DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+ *flush_id);
+ }
+ }
+}
+
+/**
+ * mdp5_ctl_commit() - Register Flush
+ *
+ * @ctl: the CTL instance
+ * @pipeline: the encoder's INTF + MIXER configuration
+ * @flush_mask: bitmask of display controller hw blocks to flush
+ * @start: if true, immediately update flush registers and set START
+ * bit, otherwise accumulate flush_mask bits until we are
+ * ready to START
+ *
+ * The flush register is used to indicate several registers are all
+ * programmed, and are safe to update to the back copy of the double
+ * buffered registers.
+ *
+ * Some registers FLUSH bits are shared when the hardware does not have
+ * dedicated bits for them; handling these is the job of fix_sw_flush().
+ *
+ * CTL registers need to be flushed in some circumstances; if that is the
+ * case, some trigger bits will be present in both flush mask and
+ * ctl->pending_ctl_trigger.
+ *
+ * Return H/W flushed bit mask.
+ */
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ u32 flush_mask, bool start)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ unsigned long flags;
+ u32 flush_id = ctl->id;
+ u32 curr_ctl_flush_mask;
+
+ VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
+
+ if (ctl->pending_ctl_trigger & flush_mask) {
+ flush_mask |= MDP5_CTL_FLUSH_CTL;
+ ctl->pending_ctl_trigger = 0;
+ }
+
+ flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
+
+ flush_mask &= ctl_mgr->flush_hw_mask;
+
+ curr_ctl_flush_mask = flush_mask;
+
+ fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
+ if (!start) {
+ ctl->flush_mask |= flush_mask;
+ return curr_ctl_flush_mask;
+ } else {
+ flush_mask |= ctl->flush_mask;
+ ctl->flush_mask = 0;
+ }
+
+ if (flush_mask) {
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+
+ if (start_signal_needed(ctl, pipeline)) {
+ send_start_signal(ctl);
+ }
+
+ return curr_ctl_flush_mask;
+}
+
+u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
+{
+ return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
+}
+
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
+{
+ return WARN_ON(!ctl) ? -EINVAL : ctl->id;
+}
+
+/*
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+ struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+ struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+ /* do nothing silently if hw doesn't support */
+ if (!ctl_mgr->single_flush_supported)
+ return 0;
+
+ if (!enable) {
+ ctlx->pair = NULL;
+ ctly->pair = NULL;
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
+ return 0;
+ } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
+ return -EINVAL;
+ } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ return -EINVAL;
+ }
+
+ ctlx->pair = ctly;
+ ctly->pair = ctlx;
+
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
+ MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+
+ return 0;
+}
+
+/*
+ * mdp5_ctl_request() - CTL allocation
+ *
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
+ *
+ * @return fail if no CTL is available.
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
+ int intf_num)
+{
+ struct mdp5_ctl *ctl = NULL;
+ const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+ u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
+ unsigned long flags;
+ int c;
+
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+
+ /* search the preferred */
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
+
+ dev_warn(ctl_mgr->dev->dev,
+ "fall back to the other CTL category for INTF %d!\n", intf_num);
+
+ match ^= CTL_STAT_BOOKED;
+ for (c = 0; c < ctl_mgr->nctl; c++)
+ if ((ctl_mgr->ctls[c].status & checkm) == match)
+ goto found;
+
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
+ goto unlock;
+
+found:
+ ctl = &ctl_mgr->ctls[c];
+ ctl->status |= CTL_STAT_BUSY;
+ ctl->pending_ctl_trigger = 0;
+ DBG("CTL %d allocated", ctl->id);
+
+unlock:
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ return ctl;
+}
+
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
+{
+ unsigned long flags;
+ int c;
+
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+ ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+ }
+}
+
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
+{
+ kfree(ctl_mgr);
+}
+
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
+{
+ struct mdp5_ctl_manager *ctl_mgr;
+ const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+ int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
+ unsigned dsi_cnt = 0;
+ const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
+ unsigned long flags;
+ int c, ret;
+
+ ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+ if (!ctl_mgr) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
+ DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
+ ctl_cfg->count);
+ ret = -ENOSPC;
+ goto fail;
+ }
+
+ /* initialize the CTL manager: */
+ ctl_mgr->dev = dev;
+ ctl_mgr->nlm = hw_cfg->lm.count;
+ ctl_mgr->nctl = ctl_cfg->count;
+ ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
+ spin_lock_init(&ctl_mgr->pool_lock);
+
+ /* initialize each CTL of the pool: */
+ spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
+ for (c = 0; c < ctl_mgr->nctl; c++) {
+ struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
+
+ if (WARN_ON(!ctl_cfg->base[c])) {
+ DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
+ ret = -EINVAL;
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ goto fail;
+ }
+ ctl->ctlm = ctl_mgr;
+ ctl->id = c;
+ ctl->reg_offset = ctl_cfg->base[c];
+ ctl->status = 0;
+ spin_lock_init(&ctl->hw_lock);
+ }
+
+ /*
+ * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI
+ * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+ * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+ * Single FLUSH is supported from hw rev v3.0.
+ */
+ for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
+ if (hw_cfg->intf.connect[c] == INTF_DSI)
+ dsi_cnt++;
+ if ((rev >= 3) && (dsi_cnt > 1)) {
+ ctl_mgr->single_flush_supported = true;
+ /* Reserve CTL0/1 for INTF1/2 */
+ ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+ ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+ }
+ spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
+ DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
+
+ return ctl_mgr;
+
+fail:
+ if (ctl_mgr)
+ mdp5_ctlm_destroy(ctl_mgr);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
new file mode 100644
index 000000000..c2af68aa7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDP5_CTL_H__
+#define __MDP5_CTL_H__
+
+#include "msm_drv.h"
+
+/*
+ * CTL Manager prototypes:
+ * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
+ * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
+ */
+struct mdp5_ctl_manager;
+struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
+ void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
+void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
+void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
+
+/*
+ * CTL prototypes:
+ * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
+ * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
+ */
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
+int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
+
+struct mdp5_interface;
+struct mdp5_pipeline;
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
+ bool enabled);
+
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable);
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
+
+#define MAX_PIPE_STAGE 2
+
+/*
+ * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
+ *
+ * @stage: array to contain the pipe num for each stage
+ * @stage_cnt: valid stage number in stage array
+ * @ctl_blend_op_flags: blender operation mode flags
+ *
+ * Note:
+ * CTL registers need to be flushed after calling this function
+ * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
+ */
+#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags);
+
+/**
+ * mdp_ctl_flush_mask...() - Register FLUSH masks
+ *
+ * These masks are used to specify which block(s) need to be flushed
+ * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
+ */
+u32 mdp_ctl_flush_mask_lm(int lm);
+u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
+u32 mdp_ctl_flush_mask_cursor(int cursor_id);
+u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
+
+/* @flush_mask: see CTL flush masks definitions below */
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask, bool start);
+u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
+
+
+
+#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
new file mode 100644
index 000000000..79d67c495
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mdp5_kms.h"
+
+static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static void mdp5_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp5_encoder);
+}
+
+static const struct drm_encoder_funcs mdp5_encoder_funcs = {
+ .destroy = mdp5_encoder_destroy,
+};
+
+static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ int intf = mdp5_encoder->intf->num;
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+ uint32_t format = 0x2100;
+ unsigned long flags;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ ctrl_pol = 0;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (mdp5_encoder->intf->type != INTF_DSI) {
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
+ }
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+
+ /* Get color format from panel, default is 8bpc */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ switch (connector->display_info.bpc) {
+ case 4:
+ format |= 0;
+ break;
+ case 5:
+ format |= 0x15;
+ break;
+ case 6:
+ format |= 0x2A;
+ break;
+ case 8:
+ default:
+ format |= 0x3F;
+ break;
+ }
+ break;
+ }
+ }
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ if (mdp5_encoder->intf->type == INTF_eDP) {
+ display_v_start += mode->htotal - mode->hsync_start;
+ display_v_end -= mode->hsync_start - mode->hdisplay;
+ }
+
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
+ MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf),
+ MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) |
+ MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf),
+ MDP5_INTF_ACTIVE_HCTL_START(0) |
+ MDP5_INTF_ACTIVE_HCTL_END(0));
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
+
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+
+ mdp5_crtc_set_pipeline(encoder->crtc);
+}
+
+static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf->num;
+ unsigned long flags;
+
+ if (WARN_ON(!mdp5_encoder->enabled))
+ return;
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
+
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
+
+ mdp5_encoder->enabled = false;
+}
+
+static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ int intfn = intf->num;
+ unsigned long flags;
+
+ if (WARN_ON(mdp5_encoder->enabled))
+ return;
+
+ spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
+ spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
+
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
+
+ mdp5_encoder->enabled = true;
+}
+
+static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
+ else
+ mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode);
+}
+
+static void mdp5_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_disable(encoder);
+ else
+ mdp5_vid_encoder_disable(encoder);
+}
+
+static void mdp5_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ /* this isn't right I think */
+ struct drm_crtc_state *cstate = encoder->crtc->state;
+
+ mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode);
+
+ if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
+ mdp5_cmd_encoder_enable(encoder);
+ else
+ mdp5_vid_encoder_enable(encoder);
+}
+
+static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+
+ mdp5_cstate->ctl = ctl;
+ mdp5_cstate->pipeline.intf = intf;
+
+ /*
+ * This is a bit awkward, but we want to flush the CTL and hit the
+ * START bit at most once for an atomic update. In the non-full-
+ * modeset case, this is done from crtc->atomic_flush(), but that
+ * is too early in the case of full modeset, in which case we
+ * defer to encoder->enable(). But we need to *know* whether
+ * encoder->enable() will be called to do this:
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ mdp5_cstate->defer_start = true;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
+ .disable = mdp5_encoder_disable,
+ .enable = mdp5_encoder_enable,
+ .atomic_check = mdp5_encoder_atomic_check,
+};
+
+int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf->num;
+
+ return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
+}
+
+u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_kms *mdp5_kms = get_kms(encoder);
+ int intf = mdp5_encoder->intf->num;
+
+ return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
+}
+
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
+ struct mdp5_kms *mdp5_kms;
+ struct device *dev;
+ int intf_num;
+ u32 data = 0;
+
+ if (!encoder || !slave_encoder)
+ return -EINVAL;
+
+ mdp5_kms = get_kms(encoder);
+ intf_num = mdp5_encoder->intf->num;
+
+ /* Switch slave encoder's TimingGen Sync mode,
+ * to use the master's enable signal for the slave encoder.
+ */
+ if (intf_num == 1)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ else if (intf_num == 2)
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ else
+ return -EINVAL;
+
+ dev = &mdp5_kms->pdev->dev;
+ /* Make sure clocks are on when connectors calling this function. */
+ pm_runtime_get_sync(dev);
+
+ /* Dumb Panel, Sync mode */
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
+
+ mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+
+ /* TODO: Expand this to set writeback modes too */
+ if (cmd_mode) {
+ WARN_ON(intf->type != INTF_DSI);
+ intf->mode = MDP5_INTF_DSI_MODE_COMMAND;
+ } else {
+ if (intf->type == INTF_DSI)
+ intf->mode = MDP5_INTF_DSI_MODE_VIDEO;
+ else
+ intf->mode = MDP5_INTF_MODE_NONE;
+ }
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp5_encoder *mdp5_encoder;
+ int enc_type = (intf->type == INTF_DSI) ?
+ DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
+ int ret;
+
+ mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
+ if (!mdp5_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp5_encoder->base;
+ mdp5_encoder->ctl = ctl;
+ mdp5_encoder->intf = intf;
+
+ spin_lock_init(&mdp5_encoder->intf_lock);
+
+ drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
+
+ drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp5_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
new file mode 100644
index 000000000..9b4c8d92f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/irq.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask)
+{
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
+}
+
+static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler);
+ static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
+ extern bool dumpstate;
+
+ DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);
+
+ if (dumpstate && __ratelimit(&rs)) {
+ struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev);
+ drm_state_dump(mdp5_kms->dev, &p);
+ if (mdp5_kms->smp)
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+ }
+}
+
+void mdp5_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ pm_runtime_put_sync(dev);
+}
+
+int mdp5_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+ struct mdp_irq *error_handler = &mdp5_kms->error_handler;
+
+ error_handler->irq = mdp5_irq_error_handler;
+ error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN |
+ MDP5_IRQ_INTF1_UNDER_RUN |
+ MDP5_IRQ_INTF2_UNDER_RUN |
+ MDP5_IRQ_INTF3_UNDER_RUN;
+
+ pm_runtime_get_sync(dev);
+ mdp_irq_register(mdp_kms, error_handler);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+void mdp5_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
+ pm_runtime_put_sync(dev);
+}
+
+irqreturn_t mdp5_irq(struct msm_kms *kms)
+{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int id;
+ uint32_t status, enable;
+
+ enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ mdp_dispatch_irqs(mdp_kms, status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp5_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ return IRQ_HANDLED;
+}
+
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), true);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ mdp_update_vblank_mask(to_mdp_kms(kms),
+ mdp5_crtc_vblank(crtc), false);
+ pm_runtime_put_sync(dev);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
new file mode 100644
index 000000000..29ae5c961
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/interconnect.h>
+#include <linux/of_irq.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "mdp5_kms.h"
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
+ unsigned long flags;
+
+ pm_runtime_get_sync(dev);
+
+ /* Magic unknown register writes:
+ *
+ * W VBIF:0x004 00000001 (mdss_mdp.c:839)
+ * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
+ * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
+ * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
+ * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
+ * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
+ * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
+ *
+ * Downstream fbdev driver gets these register offsets/values
+ * from DT.. not really sure what these registers are or if
+ * different values for different boards/SoC's, etc. I guess
+ * they are the golden registers.
+ *
+ * Not setting these does not seem to cause any problem. But
+ * we may be getting lucky with the bootloader initializing
+ * them for us. OTOH, if we can always count on the bootloader
+ * setting the golden registers, then perhaps we don't need to
+ * care.
+ */
+
+ spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
+ spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
+
+ mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct mdp5_global_state *
+mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
+{
+ return to_mdp5_global_state(mdp5_kms->glob_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct drm_private_state *priv_state;
+ int ret;
+
+ ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
+
+ return to_mdp5_global_state(priv_state);
+}
+
+static struct drm_private_state *
+mdp5_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct mdp5_global_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void mdp5_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
+
+ kfree(mdp5_state);
+}
+
+static const struct drm_private_state_funcs mdp5_global_state_funcs = {
+ .atomic_duplicate_state = mdp5_global_duplicate_state,
+ .atomic_destroy_state = mdp5_global_destroy_state,
+};
+
+static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
+{
+ struct mdp5_global_state *state;
+
+ drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->mdp5_kms = mdp5_kms;
+
+ drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
+ &state->base,
+ &mdp5_global_state_funcs);
+ return 0;
+}
+
+static void mdp5_enable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+}
+
+static void mdp5_disable_commit(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
+}
+
+static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct mdp5_global_state *global_state;
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ if (mdp5_kms->smp)
+ mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
+}
+
+static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ /* TODO */
+}
+
+static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask)
+ mdp5_crtc_wait_for_commit_done(crtc);
+}
+
+static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct mdp5_global_state *global_state;
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ if (mdp5_kms->smp)
+ mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
+}
+
+static int mdp5_set_split_display(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode)
+{
+ if (is_cmd_mode)
+ return mdp5_cmd_encoder_set_split_display(encoder,
+ slave_encoder);
+ else
+ return mdp5_vid_encoder_set_split_display(encoder,
+ slave_encoder);
+}
+
+static void mdp5_destroy(struct mdp5_kms *mdp5_kms);
+
+static void mdp5_kms_destroy(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_gem_address_space *aspace = kms->aspace;
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++)
+ mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++)
+ mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+
+ mdp_kms_destroy(&mdp5_kms->base);
+ mdp5_destroy(mdp5_kms);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int smp_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!mdp5_kms->smp) {
+ drm_printf(&p, "no SMP pool\n");
+ return 0;
+ }
+
+ mdp5_smp_dump(mdp5_kms->smp, &p);
+
+ return 0;
+}
+
+static struct drm_info_list mdp5_debugfs_list[] = {
+ {"smp", smp_show },
+};
+
+static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+#endif
+
+static const struct mdp_kms_funcs kms_funcs = {
+ .base = {
+ .hw_init = mdp5_hw_init,
+ .irq_preinstall = mdp5_irq_preinstall,
+ .irq_postinstall = mdp5_irq_postinstall,
+ .irq_uninstall = mdp5_irq_uninstall,
+ .irq = mdp5_irq,
+ .enable_vblank = mdp5_enable_vblank,
+ .disable_vblank = mdp5_disable_vblank,
+ .flush_commit = mdp5_flush_commit,
+ .enable_commit = mdp5_enable_commit,
+ .disable_commit = mdp5_disable_commit,
+ .prepare_commit = mdp5_prepare_commit,
+ .wait_flush = mdp5_wait_flush,
+ .complete_commit = mdp5_complete_commit,
+ .get_format = mdp_get_format,
+ .set_split_display = mdp5_set_split_display,
+ .destroy = mdp5_kms_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = mdp5_kms_debugfs_init,
+#endif
+ },
+ .set_irqmask = mdp5_set_irqmask,
+};
+
+static int mdp5_disable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ mdp5_kms->enable_count--;
+ WARN_ON(mdp5_kms->enable_count < 0);
+
+ clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
+ clk_disable_unprepare(mdp5_kms->tbu_clk);
+ clk_disable_unprepare(mdp5_kms->ahb_clk);
+ clk_disable_unprepare(mdp5_kms->axi_clk);
+ clk_disable_unprepare(mdp5_kms->core_clk);
+ clk_disable_unprepare(mdp5_kms->lut_clk);
+
+ return 0;
+}
+
+static int mdp5_enable(struct mdp5_kms *mdp5_kms)
+{
+ DBG("");
+
+ mdp5_kms->enable_count++;
+
+ clk_prepare_enable(mdp5_kms->ahb_clk);
+ clk_prepare_enable(mdp5_kms->axi_clk);
+ clk_prepare_enable(mdp5_kms->core_clk);
+ clk_prepare_enable(mdp5_kms->lut_clk);
+ clk_prepare_enable(mdp5_kms->tbu_clk);
+ clk_prepare_enable(mdp5_kms->tbu_rt_clk);
+
+ return 0;
+}
+
+static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct drm_encoder *encoder;
+
+ encoder = mdp5_encoder_init(dev, intf, ctl);
+ if (IS_ERR(encoder)) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
+ return encoder;
+ }
+
+ return encoder;
+}
+
+static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
+{
+ const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
+ const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
+ int id = 0, i;
+
+ for (i = 0; i < intf_cnt; i++) {
+ if (intfs[i] == INTF_DSI) {
+ if (intf_num == i)
+ return id;
+
+ id++;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+ struct mdp5_ctl *ctl;
+ struct drm_encoder *encoder;
+ int ret = 0;
+
+ switch (intf->type) {
+ case INTF_eDP:
+ DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
+ break;
+ case INTF_HDMI:
+ if (!priv->hdmi)
+ break;
+
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
+ break;
+ case INTF_DSI:
+ {
+ const struct mdp5_cfg_hw *hw_cfg =
+ mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
+
+ if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
+ DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
+ intf->num);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!priv->dsi[dsi_id])
+ break;
+
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
+ if (!ctl) {
+ ret = -EINVAL;
+ break;
+ }
+
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ break;
+ }
+
+ ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
+ if (!ret)
+ mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id]));
+
+ break;
+ }
+ default:
+ DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int modeset_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned int num_crtcs;
+ int i, ret, pi = 0, ci = 0;
+ struct drm_plane *primary[MAX_BASES] = { NULL };
+ struct drm_plane *cursor[MAX_BASES] = { NULL };
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
+
+ /*
+ * Construct encoders and modeset initialize connector devices
+ * for each external display interface.
+ */
+ for (i = 0; i < mdp5_kms->num_intfs; i++) {
+ ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
+ if (ret)
+ goto fail;
+ }
+
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
+
+ /*
+ * We should ideally have less number of encoders (set up by parsing
+ * the MDP5 interfaces) than the number of layer mixers present in HW,
+ * but let's be safe here anyway
+ */
+ num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
+
+ /*
+ * Construct planes equaling the number of hw pipes, and CRTCs for the
+ * N encoders set up by the driver. The first N planes become primary
+ * planes for the CRTCs, with the remainder as overlay planes:
+ */
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ struct drm_plane *plane;
+ enum drm_plane_type type;
+
+ if (i < num_crtcs)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ plane = mdp5_plane_init(dev, type);
+ if (IS_ERR(plane)) {
+ ret = PTR_ERR(plane);
+ DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+ goto fail;
+ }
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ primary[pi++] = plane;
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor[ci++] = plane;
+ }
+
+ for (i = 0; i < num_crtcs; i++) {
+ struct drm_crtc *crtc;
+
+ crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /*
+ * Now that we know the number of crtcs we've created, set the possible
+ * crtcs for the encoders
+ */
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
+ u32 *major, u32 *minor)
+{
+ struct device *dev = &mdp5_kms->pdev->dev;
+ u32 version;
+
+ pm_runtime_get_sync(dev);
+ version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
+ pm_runtime_put_sync(dev);
+
+ *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
+
+ DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
+}
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+ const char *name, bool mandatory)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk = msm_clk_get(pdev, name);
+ if (IS_ERR(clk) && mandatory) {
+ DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ if (IS_ERR(clk))
+ DBG("skipping %s", name);
+ else
+ *clkp = clk;
+
+ return 0;
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
+
+static int mdp5_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev;
+ struct mdp5_kms *mdp5_kms;
+ struct mdp5_cfg *config;
+ struct msm_kms *kms;
+ struct msm_gem_address_space *aspace;
+ int irq, i, ret;
+
+ ret = mdp5_init(to_platform_device(dev->dev), dev);
+ if (ret)
+ return ret;
+
+ /* priv->kms would have been populated by the MDP5 driver */
+ kms = priv->kms;
+ if (!kms)
+ return -ENOMEM;
+
+ mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ pdev = mdp5_kms->pdev;
+
+ ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n");
+ goto fail;
+ }
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
+ goto fail;
+ }
+
+ kms->irq = irq;
+
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+ !config->hw->intf.base[i])
+ continue;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
+ }
+ mdelay(16);
+
+ aspace = msm_kms_init_aspace(mdp5_kms->dev);
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ kms->aspace = aspace;
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 0xffff;
+ dev->mode_config.max_height = 0xffff;
+
+ dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
+ dev->vblank_disable_immediate = true;
+
+ return 0;
+fail:
+ if (kms)
+ mdp5_kms_destroy(kms);
+
+ return ret;
+}
+
+static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
+{
+ int i;
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
+
+ for (i = 0; i < mdp5_kms->num_intfs; i++)
+ kfree(mdp5_kms->intfs[i]);
+
+ if (mdp5_kms->rpm_enabled)
+ pm_runtime_disable(&mdp5_kms->pdev->dev);
+
+ drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
+ drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
+}
+
+static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
+ const enum mdp5_pipe *pipes, const uint32_t *offsets,
+ uint32_t caps)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ if (IS_ERR(hwpipe)) {
+ ret = PTR_ERR(hwpipe);
+ DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
+ pipe2name(pipes[i]), ret);
+ return ret;
+ }
+ hwpipe->idx = mdp5_kms->num_hwpipes;
+ mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe;
+ }
+
+ return 0;
+}
+
+static int hwpipe_init(struct mdp5_kms *mdp5_kms)
+{
+ static const enum mdp5_pipe rgb_planes[] = {
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
+ };
+ static const enum mdp5_pipe vig_planes[] = {
+ SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
+ };
+ static const enum mdp5_pipe dma_planes[] = {
+ SSPP_DMA0, SSPP_DMA1,
+ };
+ static const enum mdp5_pipe cursor_planes[] = {
+ SSPP_CURSOR0, SSPP_CURSOR1,
+ };
+ const struct mdp5_cfg_hw *hw_cfg;
+ int ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /* Construct RGB pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes,
+ hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps);
+ if (ret)
+ return ret;
+
+ /* Construct video (VIG) pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes,
+ hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps);
+ if (ret)
+ return ret;
+
+ /* Construct DMA pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes,
+ hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps);
+ if (ret)
+ return ret;
+
+ /* Construct cursor pipes: */
+ ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count,
+ cursor_planes, hw_cfg->pipe_cursor.base,
+ hw_cfg->pipe_cursor.caps);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hwmixer_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ int i, ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ for (i = 0; i < hw_cfg->lm.count; i++) {
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
+ if (IS_ERR(mixer)) {
+ ret = PTR_ERR(mixer);
+ DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
+ i, ret);
+ return ret;
+ }
+
+ mixer->idx = mdp5_kms->num_hwmixers;
+ mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
+ }
+
+ return 0;
+}
+
+static int interface_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ const enum mdp5_intf_type *intf_types;
+ int i;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ intf_types = hw_cfg->intf.connect;
+
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
+ struct mdp5_interface *intf;
+
+ if (intf_types[i] == INTF_DISABLED)
+ continue;
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf) {
+ DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
+ return -ENOMEM;
+ }
+
+ intf->num = i;
+ intf->type = intf_types[i];
+ intf->mode = MDP5_INTF_MODE_NONE;
+ intf->idx = mdp5_kms->num_intfs;
+ mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
+ }
+
+ return 0;
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms;
+ struct mdp5_cfg *config;
+ u32 major, minor;
+ int ret;
+
+ mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ spin_lock_init(&mdp5_kms->resource_lock);
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->pdev = pdev;
+
+ ret = mdp5_global_obj_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
+ goto fail;
+ }
+
+ /* mandatory clocks: */
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
+ if (ret)
+ goto fail;
+
+ /* optional clocks: */
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
+ get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false);
+ get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false);
+
+ /* we need to set a default rate before enabling. Set a safe
+ * rate first, then figure out hw revision, and then set a
+ * more optimal rate:
+ */
+ clk_set_rate(mdp5_kms->core_clk, 200000000);
+
+ /* set uninit-ed kms */
+ priv->kms = &mdp5_kms->base.base;
+
+ pm_runtime_enable(&pdev->dev);
+ mdp5_kms->rpm_enabled = true;
+
+ read_mdp_hw_revision(mdp5_kms, &major, &minor);
+
+ mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
+ if (IS_ERR(mdp5_kms->cfg)) {
+ ret = PTR_ERR(mdp5_kms->cfg);
+ mdp5_kms->cfg = NULL;
+ goto fail;
+ }
+
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
+ mdp5_kms->caps = config->hw->mdp.caps;
+
+ /* TODO: compute core clock rate at runtime */
+ clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
+
+ /*
+ * Some chipsets have a Shared Memory Pool (SMP), while others
+ * have dedicated latency buffering per source pipe instead;
+ * this section initializes the SMP:
+ */
+ if (mdp5_kms->caps & MDP_CAP_SMP) {
+ mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp);
+ if (IS_ERR(mdp5_kms->smp)) {
+ ret = PTR_ERR(mdp5_kms->smp);
+ mdp5_kms->smp = NULL;
+ goto fail;
+ }
+ }
+
+ mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
+ if (IS_ERR(mdp5_kms->ctlm)) {
+ ret = PTR_ERR(mdp5_kms->ctlm);
+ mdp5_kms->ctlm = NULL;
+ goto fail;
+ }
+
+ ret = hwpipe_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ ret = hwmixer_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ ret = interface_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ if (mdp5_kms)
+ mdp5_destroy(mdp5_kms);
+ return ret;
+}
+
+static int mdp5_setup_interconnect(struct platform_device *pdev)
+{
+ struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
+ struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
+ struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
+
+ if (IS_ERR(path0))
+ return PTR_ERR(path0);
+
+ if (!path0) {
+ /* no interconnect support is not necessarily a fatal
+ * condition, the platform may simply not have an
+ * interconnect driver yet. But warn about it in case
+ * bootloader didn't setup bus clocks high enough for
+ * scanout.
+ */
+ dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n");
+ return 0;
+ }
+
+ icc_set_bw(path0, 0, MBps_to_icc(6400));
+
+ if (!IS_ERR_OR_NULL(path1))
+ icc_set_bw(path1, 0, MBps_to_icc(6400));
+ if (!IS_ERR_OR_NULL(path_rot))
+ icc_set_bw(path_rot, 0, MBps_to_icc(6400));
+
+ return 0;
+}
+
+static int mdp5_dev_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ DBG("");
+
+ ret = mdp5_setup_interconnect(pdev);
+ if (ret)
+ return ret;
+
+ return msm_drv_probe(&pdev->dev, mdp5_kms_init);
+}
+
+static int mdp5_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_master_del(&pdev->dev, &msm_drm_ops);
+ return 0;
+}
+
+static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+
+ DBG("");
+
+ return mdp5_disable(mdp5_kms);
+}
+
+static __maybe_unused int mdp5_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+
+ DBG("");
+
+ return mdp5_enable(mdp5_kms);
+}
+
+static const struct dev_pm_ops mdp5_pm_ops = {
+ SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static const struct of_device_id mdp5_dt_match[] = {
+ { .compatible = "qcom,mdp5", },
+ /* to support downstream DT files */
+ { .compatible = "qcom,mdss_mdp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdp5_dt_match);
+
+static struct platform_driver mdp5_driver = {
+ .probe = mdp5_dev_probe,
+ .remove = mdp5_dev_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "msm_mdp",
+ .of_match_table = mdp5_dt_match,
+ .pm = &mdp5_pm_ops,
+ },
+};
+
+void __init msm_mdp_register(void)
+{
+ DBG("");
+ platform_driver_register(&mdp5_driver);
+}
+
+void __exit msm_mdp_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&mdp5_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
new file mode 100644
index 000000000..29bf11f08
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP5_KMS_H__
+#define __MDP5_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "disp/mdp_kms.h"
+#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
+#include "mdp5.xml.h"
+#include "mdp5_pipe.h"
+#include "mdp5_mixer.h"
+#include "mdp5_ctl.h"
+#include "mdp5_smp.h"
+
+struct mdp5_kms {
+ struct mdp_kms base;
+
+ struct drm_device *dev;
+
+ struct platform_device *pdev;
+
+ unsigned num_hwpipes;
+ struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
+
+ unsigned num_hwmixers;
+ struct mdp5_hw_mixer *hwmixers[8];
+
+ unsigned num_intfs;
+ struct mdp5_interface *intfs[5];
+
+ struct mdp5_cfg_handler *cfg;
+ uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
+
+ /*
+ * Global private object state, Do not access directly, use
+ * mdp5_global_get_state()
+ */
+ struct drm_modeset_lock glob_state_lock;
+ struct drm_private_obj glob_state;
+
+ struct mdp5_smp *smp;
+ struct mdp5_ctl_manager *ctlm;
+
+ /* io/register spaces: */
+ void __iomem *mmio;
+
+ struct clk *axi_clk;
+ struct clk *ahb_clk;
+ struct clk *core_clk;
+ struct clk *lut_clk;
+ struct clk *tbu_clk;
+ struct clk *tbu_rt_clk;
+ struct clk *vsync_clk;
+
+ /*
+ * lock to protect access to global resources: ie., following register:
+ * - REG_MDP5_DISP_INTF_SEL
+ */
+ spinlock_t resource_lock;
+
+ bool rpm_enabled;
+
+ struct mdp_irq error_handler;
+
+ int enable_count;
+};
+#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
+
+/* Global private object state for tracking resources that are shared across
+ * multiple kms objects (planes/crtcs/etc).
+ */
+#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base)
+struct mdp5_global_state {
+ struct drm_private_state base;
+
+ struct drm_atomic_state *state;
+ struct mdp5_kms *mdp5_kms;
+
+ struct mdp5_hw_pipe_state hwpipe;
+ struct mdp5_hw_mixer_state hwmixer;
+ struct mdp5_smp_state smp;
+};
+
+struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms);
+struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s);
+
+/* Atomic plane state. Subclasses the base drm_plane_state in order to
+ * track assigned hwpipe and hw specific state.
+ */
+struct mdp5_plane_state {
+ struct drm_plane_state base;
+
+ struct mdp5_hw_pipe *hwpipe;
+ struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
+
+ /* assigned by crtc blender */
+ enum mdp_mixer_stage_id stage;
+
+ /* whether attached CRTC needs pixel data explicitly flushed to
+ * display (ex. DSI command mode display)
+ */
+ bool needs_dirtyfb;
+};
+#define to_mdp5_plane_state(x) \
+ container_of(x, struct mdp5_plane_state, base)
+
+struct mdp5_pipeline {
+ struct mdp5_interface *intf;
+ struct mdp5_hw_mixer *mixer;
+ struct mdp5_hw_mixer *r_mixer; /* right mixer */
+};
+
+struct mdp5_crtc_state {
+ struct drm_crtc_state base;
+
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline pipeline;
+
+ /* these are derivatives of intf/mixer state in mdp5_pipeline */
+ u32 vblank_irqmask;
+ u32 err_irqmask;
+ u32 pp_done_irqmask;
+
+ bool cmd_mode;
+
+ /* should we not write CTL[n].START register on flush? If the
+ * encoder has changed this is set to true, since encoder->enable()
+ * is called after crtc state is committed, but we only want to
+ * write the CTL[n].START register once. This lets us defer
+ * writing CTL[n].START until encoder->enable()
+ */
+ bool defer_start;
+};
+#define to_mdp5_crtc_state(x) \
+ container_of(x, struct mdp5_crtc_state, base)
+
+enum mdp5_intf_mode {
+ MDP5_INTF_MODE_NONE = 0,
+
+ /* Modes used for DSI interface (INTF_DSI type): */
+ MDP5_INTF_DSI_MODE_VIDEO,
+ MDP5_INTF_DSI_MODE_COMMAND,
+
+ /* Modes used for WB interface (INTF_WB type): */
+ MDP5_INTF_WB_MODE_BLOCK,
+ MDP5_INTF_WB_MODE_LINE,
+};
+
+struct mdp5_interface {
+ int idx;
+ int num; /* display interface number */
+ enum mdp5_intf_type type;
+ enum mdp5_intf_mode mode;
+};
+
+struct mdp5_encoder {
+ struct drm_encoder base;
+ spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
+ bool enabled;
+ uint32_t bsc;
+
+ struct mdp5_interface *intf;
+ struct mdp5_ctl *ctl;
+};
+#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
+
+static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
+{
+ WARN_ON(mdp5_kms->enable_count <= 0);
+ msm_writel(data, mdp5_kms->mmio + reg);
+}
+
+static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
+{
+ WARN_ON(mdp5_kms->enable_count <= 0);
+ return msm_readl(mdp5_kms->mmio + reg);
+}
+
+static inline const char *stage2name(enum mdp_mixer_stage_id stage)
+{
+ static const char *names[] = {
+#define NAME(n) [n] = #n
+ NAME(STAGE_UNUSED), NAME(STAGE_BASE),
+ NAME(STAGE0), NAME(STAGE1), NAME(STAGE2),
+ NAME(STAGE3), NAME(STAGE4), NAME(STAGE6),
+#undef NAME
+ };
+ return names[stage];
+}
+
+static inline const char *pipe2name(enum mdp5_pipe pipe)
+{
+ static const char *names[] = {
+#define NAME(n) [SSPP_ ## n] = #n
+ NAME(VIG0), NAME(VIG1), NAME(VIG2),
+ NAME(RGB0), NAME(RGB1), NAME(RGB2),
+ NAME(DMA0), NAME(DMA1),
+ NAME(VIG3), NAME(RGB3),
+ NAME(CURSOR0), NAME(CURSOR1),
+#undef NAME
+ };
+ return names[pipe];
+}
+
+static inline int pipe2nclients(enum mdp5_pipe pipe)
+{
+ switch (pipe) {
+ case SSPP_RGB0:
+ case SSPP_RGB1:
+ case SSPP_RGB2:
+ case SSPP_RGB3:
+ return 1;
+ default:
+ return 3;
+ }
+}
+
+static inline uint32_t intf2err(int intf_num)
+{
+ switch (intf_num) {
+ case 0: return MDP5_IRQ_INTF0_UNDER_RUN;
+ case 1: return MDP5_IRQ_INTF1_UNDER_RUN;
+ case 2: return MDP5_IRQ_INTF2_UNDER_RUN;
+ case 3: return MDP5_IRQ_INTF3_UNDER_RUN;
+ default: return 0;
+ }
+}
+
+static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
+ struct mdp5_interface *intf)
+{
+ /*
+ * In case of DSI Command Mode, the Ping Pong's read pointer IRQ
+ * acts as a Vblank signal. The Ping Pong buffer used is bound to
+ * layer mixer.
+ */
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
+ return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
+
+ if (intf->type == INTF_WB)
+ return MDP5_IRQ_WB_2_DONE;
+
+ switch (intf->num) {
+ case 0: return MDP5_IRQ_INTF0_VSYNC;
+ case 1: return MDP5_IRQ_INTF1_VSYNC;
+ case 2: return MDP5_IRQ_INTF2_VSYNC;
+ case 3: return MDP5_IRQ_INTF3_VSYNC;
+ default: return 0;
+ }
+}
+
+static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
+{
+ return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
+}
+
+void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
+void mdp5_irq_preinstall(struct msm_kms *kms);
+int mdp5_irq_postinstall(struct msm_kms *kms);
+void mdp5_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp5_irq(struct msm_kms *kms);
+int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
+void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
+
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum drm_plane_type type);
+
+struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
+uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
+
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
+void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
+struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ struct drm_plane *cursor_plane, int id);
+
+struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
+ struct mdp5_interface *intf, struct mdp5_ctl *ctl);
+int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode);
+int mdp5_encoder_get_linecount(struct drm_encoder *encoder);
+u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder);
+
+#ifdef CONFIG_DRM_MSM_DSI
+void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void mdp5_cmd_encoder_disable(struct drm_encoder *encoder);
+void mdp5_cmd_encoder_enable(struct drm_encoder *encoder);
+int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder);
+#else
+static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
+{
+}
+static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
+{
+}
+static inline int mdp5_cmd_encoder_set_split_display(
+ struct drm_encoder *encoder, struct drm_encoder *slave_encoder)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __MDP5_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
new file mode 100644
index 000000000..2536def2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include "mdp5_kms.h"
+
+/*
+ * As of now, there are only 2 combinations possible for source split:
+ *
+ * Left | Right
+ * -----|------
+ * LM0 | LM1
+ * LM2 | LM5
+ *
+ */
+static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
+
+static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
+{
+ int i;
+ int pair_lm;
+
+ pair_lm = lm_right_pair[lm];
+ if (pair_lm < 0)
+ return -EINVAL;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
+
+ if (mixer->lm == pair_lm)
+ return mixer->idx;
+ }
+
+ return -1;
+}
+
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_global_state *global_state = mdp5_get_global_state(s);
+ struct mdp5_hw_mixer_state *new_state;
+ int i;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
+
+ /*
+ * skip if already in-use by a different CRTC. If there is a
+ * mixer already assigned to this CRTC, it means this call is
+ * a request to get an additional right mixer. Assume that the
+ * existing mixer is the 'left' one, and try to see if we can
+ * get its corresponding 'right' pair.
+ */
+ if (new_state->hwmixer_to_crtc[cur->idx] &&
+ new_state->hwmixer_to_crtc[cur->idx] != crtc)
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ if (r_mixer) {
+ int pair_idx;
+
+ pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
+ if (pair_idx < 0)
+ return -EINVAL;
+
+ if (new_state->hwmixer_to_crtc[pair_idx])
+ continue;
+
+ *r_mixer = mdp5_kms->hwmixers[pair_idx];
+ }
+
+ /*
+ * prefer a pair-able LM over an unpairable one. We can
+ * switch the CRTC from Normal mode to Source Split mode
+ * without requiring a full modeset if we had already
+ * assigned this CRTC a pair-able LM.
+ *
+ * TODO: There will be assignment sequences which would
+ * result in the CRTC requiring a full modeset, even
+ * if we have the LM resources to prevent it. For a platform
+ * with a few displays, we don't run out of pair-able LMs
+ * so easily. For now, ignore the possibility of requiring
+ * a full modeset.
+ */
+ if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
+ *mixer = cur;
+ }
+
+ if (!(*mixer))
+ return -ENOMEM;
+
+ if (r_mixer && !(*r_mixer))
+ return -ENOMEM;
+
+ DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
+
+ new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
+ if (r_mixer) {
+ DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
+ crtc->name);
+ new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
+ }
+
+ return 0;
+}
+
+int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+{
+ struct mdp5_global_state *global_state = mdp5_get_global_state(s);
+ struct mdp5_hw_mixer_state *new_state;
+
+ if (!mixer)
+ return 0;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
+
+ if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
+ return -EINVAL;
+
+ DBG("%s: release from crtc %s", mixer->name,
+ new_state->hwmixer_to_crtc[mixer->idx]->name);
+
+ new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+
+ return 0;
+}
+
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
+{
+ kfree(mixer);
+}
+
+static const char * const mixer_names[] = {
+ "LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
+{
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return ERR_PTR(-ENOMEM);
+
+ mixer->name = mixer_names[lm->id];
+ mixer->lm = lm->id;
+ mixer->caps = lm->caps;
+ mixer->pp = lm->pp;
+ mixer->dspp = lm->dspp;
+ mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
+
+ return mixer;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
new file mode 100644
index 000000000..545ee223b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDP5_LM_H__
+#define __MDP5_LM_H__
+
+/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
+struct mdp5_hw_mixer {
+ int idx;
+
+ const char *name;
+
+ int lm; /* the LM instance # */
+ uint32_t caps;
+ int pp;
+ int dspp;
+
+ uint32_t flush_mask; /* used to commit LM registers */
+};
+
+/* global atomic state of assignment between CRTCs and Layer Mixers: */
+struct mdp5_hw_mixer_state {
+ struct drm_crtc *hwmixer_to_crtc[8];
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer);
+int mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
+
+#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
new file mode 100644
index 000000000..e4b8a7898
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "mdp5_kms.h"
+
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_global_state *new_global_state, *old_global_state;
+ struct mdp5_hw_pipe_state *old_state, *new_state;
+ int i, j;
+
+ new_global_state = mdp5_get_global_state(s);
+ if (IS_ERR(new_global_state))
+ return PTR_ERR(new_global_state);
+
+ /* grab old_state after mdp5_get_global_state(), since now we hold lock: */
+ old_global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ old_state = &old_global_state->hwpipe;
+ new_state = &new_global_state->hwpipe;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
+
+ /* skip if already in-use.. check both new and old state,
+ * since we cannot immediately re-use a pipe that is
+ * released in the current update in some cases:
+ * (1) mdp5 can have SMP (non-double-buffered)
+ * (2) hw pipe previously assigned to different CRTC
+ * (vblanks might not be aligned)
+ */
+ if (new_state->hwpipe_to_plane[cur->idx] ||
+ old_state->hwpipe_to_plane[cur->idx])
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ /*
+ * don't assign a cursor pipe to a plane that isn't going to
+ * be used as a cursor
+ */
+ if (cur->caps & MDP_PIPE_CAP_CURSOR &&
+ plane->type != DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* possible candidate, take the one with the
+ * fewest unneeded caps bits set:
+ */
+ if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
+ hweight_long((*hwpipe)->caps & ~caps))) {
+ bool r_found = false;
+
+ if (r_hwpipe) {
+ for (j = i + 1; j < mdp5_kms->num_hwpipes;
+ j++) {
+ struct mdp5_hw_pipe *r_cur =
+ mdp5_kms->hwpipes[j];
+
+ /* reject different types of hwpipes */
+ if (r_cur->caps != cur->caps)
+ continue;
+
+ /* respect priority, eg. VIG0 > VIG1 */
+ if (cur->pipe > r_cur->pipe)
+ continue;
+
+ *r_hwpipe = r_cur;
+ r_found = true;
+ break;
+ }
+ }
+
+ if (!r_hwpipe || r_found)
+ *hwpipe = cur;
+ }
+ }
+
+ if (!(*hwpipe))
+ return -ENOMEM;
+
+ if (r_hwpipe && !(*r_hwpipe))
+ return -ENOMEM;
+
+ if (mdp5_kms->smp) {
+ int ret;
+
+ /* We don't support SMP and 2 hwpipes/plane together */
+ WARN_ON(r_hwpipe);
+
+ DBG("%s: alloc SMP blocks", (*hwpipe)->name);
+ ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp,
+ (*hwpipe)->pipe, blkcfg);
+ if (ret)
+ return -ENOMEM;
+
+ (*hwpipe)->blkcfg = blkcfg;
+ }
+
+ DBG("%s: assign to plane %s for caps %x",
+ (*hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane;
+
+ if (r_hwpipe) {
+ DBG("%s: assign to right of plane %s for caps %x",
+ (*r_hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane;
+ }
+
+ return 0;
+}
+
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_global_state *state;
+ struct mdp5_hw_pipe_state *new_state;
+
+ if (!hwpipe)
+ return 0;
+
+ state = mdp5_get_global_state(s);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwpipe;
+
+ if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
+ return -EINVAL;
+
+ DBG("%s: release from plane %s", hwpipe->name,
+ new_state->hwpipe_to_plane[hwpipe->idx]->name);
+
+ if (mdp5_kms->smp) {
+ DBG("%s: free SMP blocks", hwpipe->name);
+ mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe);
+ }
+
+ new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+
+ return 0;
+}
+
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
+{
+ kfree(hwpipe);
+}
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps)
+{
+ struct mdp5_hw_pipe *hwpipe;
+
+ hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ if (!hwpipe)
+ return ERR_PTR(-ENOMEM);
+
+ hwpipe->name = pipe2name(pipe);
+ hwpipe->pipe = pipe;
+ hwpipe->reg_offset = reg_offset;
+ hwpipe->caps = caps;
+ hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
+
+ return hwpipe;
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
new file mode 100644
index 000000000..cca67938c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP5_PIPE_H__
+#define __MDP5_PIPE_H__
+
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX (SSPP_CURSOR1 + 1)
+
+/* represents a hw pipe, which is dynamically assigned to a plane */
+struct mdp5_hw_pipe {
+ int idx;
+
+ const char *name;
+ enum mdp5_pipe pipe;
+
+ uint32_t reg_offset;
+ uint32_t caps;
+
+ uint32_t flush_mask; /* used to commit pipe registers */
+
+ /* number of smp blocks per plane, ie:
+ * nblks_y | (nblks_u << 8) | (nblks_v << 16)
+ */
+ uint32_t blkcfg;
+};
+
+/* global atomic state of assignment between pipes and planes: */
+struct mdp5_hw_pipe_state {
+ struct drm_plane *hwpipe_to_plane[SSPP_MAX];
+};
+
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe);
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+
+struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+ uint32_t reg_offset, uint32_t caps);
+void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
+
+#endif /* __MDP5_PIPE_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
new file mode 100644
index 000000000..0d5ff03cb
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
+
+#include "mdp5_kms.h"
+
+struct mdp5_plane {
+ struct drm_plane base;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_rect *src, struct drm_rect *dest);
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static bool plane_enabled(struct drm_plane_state *state)
+{
+ return state->visible;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+static void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ unsigned int zpos;
+
+ drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ zpos = STAGE_BASE;
+ else
+ zpos = STAGE0 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 255);
+}
+
+static void
+mdp5_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+ struct mdp5_kms *mdp5_kms = get_kms(state->plane);
+
+ drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
+ pstate->hwpipe->name : "(null)");
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright-hwpipe=%s\n",
+ pstate->r_hwpipe ? pstate->r_hwpipe->name :
+ "(null)");
+ drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
+ drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
+ drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
+ drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
+}
+
+static void mdp5_plane_reset(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
+ mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
+ __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
+}
+
+static struct drm_plane_state *
+mdp5_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+ sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
+
+ return &mdp5_state->base;
+}
+
+static void mdp5_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+
+ kfree(pstate);
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = mdp5_plane_destroy,
+ .reset = mdp5_plane_reset,
+ .atomic_duplicate_state = mdp5_plane_duplicate_state,
+ .atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
+};
+
+static int mdp5_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
+
+ if (!new_state->fb)
+ return 0;
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
+}
+
+static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp5_kms->base.base;
+ struct drm_framebuffer *fb = old_state->fb;
+ bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
+
+ if (!fb)
+ return;
+
+ DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
+}
+
+static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+ struct drm_plane *plane = state->plane;
+ struct drm_plane_state *old_state = plane->state;
+ struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
+ bool new_hwpipe = false;
+ bool need_right_hwpipe = false;
+ uint32_t max_width, max_height;
+ bool out_of_bounds = false;
+ uint32_t caps = 0;
+ int min_scale, max_scale;
+ int ret;
+
+ DBG("%s: check (%d -> %d)", plane->name,
+ plane_enabled(old_state), plane_enabled(state));
+
+ max_width = config->hw->lm.max_width << 16;
+ max_height = config->hw->lm.max_height << 16;
+
+ /* Make sure source dimensions are within bounds. */
+ if (state->src_h > max_height)
+ out_of_bounds = true;
+
+ if (state->src_w > max_width) {
+ /* If source split is supported, we can go up to 2x
+ * the max LM width, but we'd need to stage another
+ * hwpipe to the right LM. So, the drm_plane would
+ * consist of 2 hwpipes.
+ */
+ if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
+ (state->src_w <= 2 * max_width))
+ need_right_hwpipe = true;
+ else
+ out_of_bounds = true;
+ }
+
+ if (out_of_bounds) {
+ struct drm_rect src = drm_plane_state_src(state);
+ DBG("Invalid source size "DRM_RECT_FP_FMT,
+ DRM_RECT_FP_ARG(&src));
+ return -ERANGE;
+ }
+
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (plane_enabled(state)) {
+ unsigned int rotation;
+ const struct mdp_format *format;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ uint32_t blkcfg = 0;
+
+ format = to_mdp_format(msm_framebuffer_format(state->fb));
+ if (MDP_FORMAT_IS_YUV(format))
+ caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+
+ if (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))
+ caps |= MDP_PIPE_CAP_SCALE;
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ caps |= MDP_PIPE_CAP_HFLIP;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ caps |= MDP_PIPE_CAP_VFLIP;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ caps |= MDP_PIPE_CAP_CURSOR;
+
+ /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
+ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
+ new_hwpipe = true;
+
+ /*
+ * (re)allocte hw pipe if we're either requesting for 2 hw pipes
+ * or we're switching from 2 hw pipes to 1 hw pipe because the
+ * new src_w can be supported by 1 hw pipe itself.
+ */
+ if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
+ (!need_right_hwpipe && mdp5_state->r_hwpipe))
+ new_hwpipe = true;
+
+ if (mdp5_kms->smp) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(state->fb));
+
+ blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
+ state->src_w >> 16, false);
+
+ if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
+ new_hwpipe = true;
+ }
+
+ /* (re)assign hwpipe if needed, otherwise keep old one: */
+ if (new_hwpipe) {
+ /* TODO maybe we want to re-assign hwpipe sometimes
+ * in cases when we no-longer need some caps to make
+ * it available for other planes?
+ */
+ struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ struct mdp5_hw_pipe *old_right_hwpipe =
+ mdp5_state->r_hwpipe;
+ struct mdp5_hw_pipe *new_hwpipe = NULL;
+ struct mdp5_hw_pipe *new_right_hwpipe = NULL;
+
+ ret = mdp5_pipe_assign(state->state, plane, caps,
+ blkcfg, &new_hwpipe,
+ need_right_hwpipe ?
+ &new_right_hwpipe : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hwpipe(s)!",
+ plane->name);
+ return ret;
+ }
+
+ mdp5_state->hwpipe = new_hwpipe;
+ if (need_right_hwpipe)
+ mdp5_state->r_hwpipe = new_right_hwpipe;
+ else
+ /*
+ * set it to NULL so that the driver knows we
+ * don't have a right hwpipe when committing a
+ * new state
+ */
+ mdp5_state->r_hwpipe = NULL;
+
+
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
+ }
+ } else {
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
+ mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
+ }
+
+ return 0;
+}
+
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
+}
+
+static void mdp5_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+
+ DBG("%s: update", plane->name);
+
+ if (plane_enabled(new_state)) {
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane,
+ new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+ int ret;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ /* don't use fast path if we don't have a hwpipe allocated yet */
+ if (!mdp5_state->hwpipe)
+ return -EINVAL;
+
+ /* only allow changing of position(crtc x/y or src x/y) in fast path */
+ if (plane->state->crtc != new_plane_state->crtc ||
+ plane->state->src_w != new_plane_state->src_w ||
+ plane->state->src_h != new_plane_state->src_h ||
+ plane->state->crtc_w != new_plane_state->crtc_w ||
+ plane->state->crtc_h != new_plane_state->crtc_h ||
+ !plane->state->fb ||
+ plane->state->fb != new_plane_state->fb)
+ return -EINVAL;
+
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ /*
+ * if the visibility of the plane changes (i.e, if the cursor is
+ * clipped out completely, we can't take the async path because
+ * we need to stage/unstage the plane from the Layer Mixer(s). We
+ * also assign/unassign the hwpipe(s) tied to the plane. We avoid
+ * taking the fast path for both these reasons.
+ */
+ if (new_plane_state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (plane_enabled(new_state)) {
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline *pipeline =
+ mdp5_crtc_get_pipeline(new_state->crtc);
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ WARN_ON(ret < 0);
+
+ ctl = mdp5_crtc_get_ctl(new_state->crtc);
+
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
+ }
+
+ *to_mdp5_plane_state(plane->state) =
+ *to_mdp5_plane_state(new_state);
+
+ new_state->fb = old_fb;
+}
+
+static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
+ .prepare_fb = mdp5_plane_prepare_fb,
+ .cleanup_fb = mdp5_plane_cleanup_fb,
+ .atomic_check = mdp5_plane_atomic_check,
+ .atomic_update = mdp5_plane_atomic_update,
+ .atomic_async_check = mdp5_plane_atomic_async_check,
+ .atomic_async_update = mdp5_plane_atomic_async_update,
+};
+
+static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
+ enum mdp5_pipe pipe,
+ struct drm_framebuffer *fb)
+{
+ struct msm_kms *kms = &mdp5_kms->base.base;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 0));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 1));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 2));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 3));
+}
+
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
+{
+ uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
+ ~MDP5_PIPE_OP_MODE_CSC_1_EN;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
+}
+
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
+ struct csc_cfg *csc)
+{
+ uint32_t i, mode = 0; /* RGB, no CSC */
+ uint32_t *matrix;
+
+ if (unlikely(!csc))
+ return;
+
+ if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
+ if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
+ mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
+
+ matrix = csc->matrix;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
+
+ for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
+ uint32_t *pre_clamp = csc->pre_clamp;
+ uint32_t *post_clamp = csc->post_clamp;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
+ }
+}
+
+#define PHASE_STEP_SHIFT 21
+#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */
+
+static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
+{
+ uint32_t unit;
+
+ if (src == 0 || dst == 0)
+ return -EINVAL;
+
+ /*
+ * PHASE_STEP_X/Y is coded on 26 bits (25:0),
+ * where 2^21 represents the unity "1" in fixed-point hardware design.
+ * This leaves 5 bits for the integer part (downscale case):
+ * -> maximum downscale ratio = 0b1_1111 = 31
+ */
+ if (src > (dst * DOWN_SCALE_RATIO_MAX))
+ return -EOVERFLOW;
+
+ unit = 1 << PHASE_STEP_SHIFT;
+ *out_phase = mult_frac(unit, src, dst);
+
+ return 0;
+}
+
+static int calc_scalex_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasex_steps[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
+ uint32_t phasex_step;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasex_step);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+ return ret;
+ }
+
+ phasex_steps[COMP_0] = phasex_step;
+ phasex_steps[COMP_3] = phasex_step;
+ phasex_steps[COMP_1_2] = phasex_step / info->hsub;
+
+ return 0;
+}
+
+static int calc_scaley_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasey_steps[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
+ uint32_t phasey_step;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasey_step);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+ return ret;
+ }
+
+ phasey_steps[COMP_0] = phasey_step;
+ phasey_steps[COMP_3] = phasey_step;
+ phasey_steps[COMP_1_2] = phasey_step / info->vsub;
+
+ return 0;
+}
+
+static uint32_t get_scale_config(const struct mdp_format *format,
+ uint32_t src, uint32_t dst, bool horz)
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ bool scaling = format->is_yuv ? true : (src != dst);
+ uint32_t sub;
+ uint32_t ya_filter, uv_filter;
+ bool yuv = format->is_yuv;
+
+ if (!scaling)
+ return 0;
+
+ if (yuv) {
+ sub = horz ? info->hsub : info->vsub;
+ uv_filter = ((src / sub) <= dst) ?
+ SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ }
+ ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+
+ if (horz)
+ return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
+ COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
+ else
+ return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
+ COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
+}
+
+static void calc_pixel_ext(const struct mdp_format *format,
+ uint32_t src, uint32_t dst, uint32_t phase_step[2],
+ int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
+ bool horz)
+{
+ bool scaling = format->is_yuv ? true : (src != dst);
+ int i;
+
+ /*
+ * Note:
+ * We assume here that:
+ * 1. PCMN filter is used for downscale
+ * 2. bilinear filter is used for upscale
+ * 3. we are in a single pipe configuration
+ */
+
+ for (i = 0; i < COMP_MAX; i++) {
+ pix_ext_edge1[i] = 0;
+ pix_ext_edge2[i] = scaling ? 1 : 0;
+ }
+}
+
+static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
+ const struct mdp_format *format,
+ uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
+ uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ uint32_t lr, tb, req;
+ int i;
+
+ for (i = 0; i < COMP_MAX; i++) {
+ uint32_t roi_w = src_w;
+ uint32_t roi_h = src_h;
+
+ if (format->is_yuv && i == COMP_1_2) {
+ roi_w /= info->hsub;
+ roi_h /= info->vsub;
+ }
+
+ lr = (pe_left[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
+ MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
+
+ lr |= (pe_right[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
+ MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
+
+ tb = (pe_top[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
+ MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
+
+ tb |= (pe_bottom[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
+ MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
+
+ req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
+ pe_left[i] + pe_right[i]);
+
+ req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
+ pe_top[i] + pe_bottom[i]);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
+
+ DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
+ FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
+
+ DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
+ FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
+ }
+}
+
+struct pixel_ext {
+ int left[COMP_MAX];
+ int right[COMP_MAX];
+ int top[COMP_MAX];
+ int bottom[COMP_MAX];
+};
+
+struct phase_step {
+ u32 x[COMP_MAX];
+ u32 y[COMP_MAX];
+};
+
+static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
+ struct mdp5_hw_pipe *hwpipe,
+ struct drm_framebuffer *fb,
+ struct phase_step *step,
+ struct pixel_ext *pe,
+ u32 scale_config, u32 hdecm, u32 vdecm,
+ bool hflip, bool vflip,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_img_w, u32 src_img_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+ (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
+ COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ mdp5_write_pixel_ext(mdp5_kms, pipe, format,
+ src_w, pe->left, pe->right,
+ src_h, pe->top, pe->bottom);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ step->x[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ step->y[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ step->x[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ step->y[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ scale_config);
+ }
+
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
+ }
+
+ set_scanout_locked(mdp5_kms, pipe, fb);
+}
+
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_rect *src, struct drm_rect *dest)
+{
+ struct drm_plane_state *pstate = plane->state;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ struct mdp5_hw_pipe *right_hwpipe;
+ const struct mdp_format *format;
+ uint32_t nplanes, config = 0;
+ struct phase_step step = { { 0 } };
+ struct pixel_ext pe = { { 0 } };
+ uint32_t hdecm = 0, vdecm = 0;
+ uint32_t pix_format;
+ unsigned int rotation;
+ bool vflip, hflip;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+ uint32_t src_img_w, src_img_h;
+ int ret;
+
+ nplanes = fb->format->num_planes;
+
+ /* bad formats should already be rejected: */
+ if (WARN_ON(nplanes > pipe2nclients(pipe)))
+ return -EINVAL;
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+ pix_format = format->base.pixel_format;
+
+ src_x = src->x1;
+ src_y = src->y1;
+ src_w = drm_rect_width(src);
+ src_h = drm_rect_height(src);
+
+ crtc_x = dest->x1;
+ crtc_y = dest->y1;
+ crtc_w = drm_rect_width(dest);
+ crtc_h = drm_rect_height(dest);
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ src_img_w = min(fb->width, src_w);
+ src_img_h = min(fb->height, src_h);
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
+ if (right_hwpipe) {
+ /*
+ * if the plane comprises of 2 hw pipes, assume that the width
+ * is split equally across them. The only parameters that varies
+ * between the 2 pipes are src_x and crtc_x
+ */
+ crtc_w /= 2;
+ src_w /= 2;
+ src_img_w /= 2;
+ }
+
+ ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
+ if (ret)
+ return ret;
+
+ ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
+ if (ret)
+ return ret;
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
+ calc_pixel_ext(format, src_w, crtc_w, step.x,
+ pe.left, pe.right, true);
+ calc_pixel_ext(format, src_h, crtc_h, step.y,
+ pe.top, pe.bottom, false);
+ }
+
+ /* TODO calc hdecm, vdecm */
+
+ /* SCALE is used to both scale and up-sample chroma components */
+ config |= get_scale_config(format, src_w, crtc_w, true);
+ config |= get_scale_config(format, src_h, crtc_h, false);
+ DBG("scale config = %x", config);
+
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ hflip = !!(rotation & DRM_MODE_REFLECT_X);
+ vflip = !!(rotation & DRM_MODE_REFLECT_Y);
+
+ mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x, src_y, src_w, src_h);
+ if (right_hwpipe)
+ mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x + src_w, src_y, src_w, src_h);
+
+ return ret;
+}
+
+/*
+ * Use this func and the one below only after the atomic state has been
+ * successfully swapped
+ */
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (WARN_ON(!pstate->hwpipe))
+ return SSPP_NONE;
+
+ return pstate->hwpipe->pipe;
+}
+
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (!pstate->r_hwpipe)
+ return SSPP_NONE;
+
+ return pstate->r_hwpipe->pipe;
+}
+
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+ u32 mask;
+
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
+
+ mask = pstate->hwpipe->flush_mask;
+
+ if (pstate->r_hwpipe)
+ mask |= pstate->r_hwpipe->flush_mask;
+
+ return mask;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum drm_plane_type type)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp5_plane *mdp5_plane;
+ int ret;
+
+ mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+ if (!mdp5_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp5_plane->base;
+
+ mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats), false);
+
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
+
+ mdp5_plane_install_properties(plane, &plane->base);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp5_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
new file mode 100644
index 000000000..56a306354
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_util.h>
+
+#include "mdp5_kms.h"
+#include "mdp5_smp.h"
+
+
+struct mdp5_smp {
+ struct drm_device *dev;
+
+ uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */
+
+ int blk_cnt;
+ int blk_size;
+
+ /* register cache */
+ u32 alloc_w[22];
+ u32 alloc_r[22];
+ u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
+};
+
+static inline
+struct mdp5_kms *get_kms(struct mdp5_smp *smp)
+{
+ struct msm_drm_private *priv = smp->dev->dev_private;
+
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static inline u32 pipe2client(enum mdp5_pipe pipe, int plane)
+{
+#define CID_UNUSED 0
+
+ if (WARN_ON(plane >= pipe2nclients(pipe)))
+ return CID_UNUSED;
+
+ /*
+ * Note on SMP clients:
+ * For ViG pipes, fetch Y/Cr/Cb-components clients are always
+ * consecutive, and in that order.
+ *
+ * e.g.:
+ * if mdp5_cfg->smp.clients[SSPP_VIG0] = N,
+ * Y plane's client ID is N
+ * Cr plane's client ID is N + 1
+ * Cb plane's client ID is N + 2
+ */
+
+ return mdp5_cfg->smp.clients[pipe] + plane;
+}
+
+/* allocate blocks for the specified request: */
+static int smp_request_block(struct mdp5_smp *smp,
+ struct mdp5_smp_state *state,
+ u32 cid, int nblks)
+{
+ void *cs = state->client_state[cid];
+ int i, avail, cnt = smp->blk_cnt;
+ uint8_t reserved;
+
+ /* we shouldn't be requesting blocks for an in-use client: */
+ WARN_ON(!bitmap_empty(cs, cnt));
+
+ reserved = smp->reserved[cid];
+
+ if (reserved) {
+ nblks = max(0, nblks - reserved);
+ DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
+ }
+
+ avail = cnt - bitmap_weight(state->state, cnt);
+ if (nblks > avail) {
+ DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ nblks, avail);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nblks; i++) {
+ int blk = find_first_zero_bit(state->state, cnt);
+ set_bit(blk, cs);
+ set_bit(blk, state->state);
+ }
+
+ return 0;
+}
+
+static void set_fifo_thresholds(struct mdp5_smp *smp,
+ enum mdp5_pipe pipe, int nblks)
+{
+ u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
+ u32 val;
+
+ /* 1/4 of SMP pool that is being fetched */
+ val = (nblks * smp_entries_per_blk) / 4;
+
+ smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
+ smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
+ smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
+}
+
+/*
+ * NOTE: looks like if horizontal decimation is used (if we supported that)
+ * then the width used to calculate SMP block requirements is the post-
+ * decimated width. Ie. SMP buffering sits downstream of decimation (which
+ * presumably happens during the dma from scanout buffer).
+ */
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim)
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
+ int i, hsub, nplanes, nlines;
+ uint32_t blkcfg = 0;
+
+ nplanes = info->num_planes;
+ hsub = info->hsub;
+
+ /* different if BWC (compressed framebuffer?) enabled: */
+ nlines = 2;
+
+ /* Newer MDPs have split/packing logic, which fetches sub-sampled
+ * U and V components (splits them from Y if necessary) and packs
+ * them together, writes to SMP using a single client.
+ */
+ if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) {
+ nplanes = 2;
+
+ /* if decimation is enabled, HW decimates less on the
+ * sub sampled chroma components
+ */
+ if (hdecim && (hsub > 1))
+ hsub = 1;
+ }
+
+ for (i = 0; i < nplanes; i++) {
+ int n, fetch_stride, cpp;
+
+ cpp = info->cpp[i];
+ fetch_stride = width * cpp / (i ? hsub : 1);
+
+ n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
+
+ /* for hw rev v1.00 */
+ if (rev == 0)
+ n = roundup_pow_of_two(n);
+
+ blkcfg |= (n << (8 * i));
+ }
+
+ return blkcfg;
+}
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct drm_device *dev = mdp5_kms->dev;
+ int i, ret;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ int n = blkcfg & 0xff;
+
+ if (!n)
+ continue;
+
+ DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
+ ret = smp_request_block(smp, state, cid, n);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ n, ret);
+ return ret;
+ }
+
+ blkcfg >>= 8;
+ }
+
+ state->assigned |= (1 << pipe);
+
+ return 0;
+}
+
+/* Release SMP blocks for all clients of the pipe */
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe)
+{
+ int i;
+ int cnt = smp->blk_cnt;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ void *cs = state->client_state[cid];
+
+ /* update global state: */
+ bitmap_andnot(state->state, state->state, cs, cnt);
+
+ /* clear client's state */
+ bitmap_zero(cs, cnt);
+ }
+
+ state->released |= (1 << pipe);
+}
+
+/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to
+ * happen after scanout completes.
+ */
+static unsigned update_smp_state(struct mdp5_smp *smp,
+ u32 cid, mdp5_smp_state_t *assigned)
+{
+ int cnt = smp->blk_cnt;
+ unsigned nblks = 0;
+ u32 blk, val;
+
+ for_each_set_bit(blk, *assigned, cnt) {
+ int idx = blk / 3;
+ int fld = blk % 3;
+
+ val = smp->alloc_w[idx];
+
+ switch (fld) {
+ case 0:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
+ break;
+ case 1:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
+ break;
+ case 2:
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
+ break;
+ }
+
+ smp->alloc_w[idx] = val;
+ smp->alloc_r[idx] = val;
+
+ nblks++;
+ }
+
+ return nblks;
+}
+
+static void write_smp_alloc_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i, num_regs;
+
+ num_regs = smp->blk_cnt / 3 + 1;
+
+ for (i = 0; i < num_regs; i++) {
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
+ smp->alloc_w[i]);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
+ smp->alloc_r[i]);
+ }
+}
+
+static void write_smp_fifo_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
+ smp->pipe_reqprio_fifo_wm0[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
+ smp->pipe_reqprio_fifo_wm1[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
+ smp->pipe_reqprio_fifo_wm2[pipe]);
+ }
+}
+
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
+{
+ enum mdp5_pipe pipe;
+
+ for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) {
+ unsigned i, nblks = 0;
+
+ for (i = 0; i < pipe2nclients(pipe); i++) {
+ u32 cid = pipe2client(pipe, i);
+ void *cs = state->client_state[cid];
+
+ nblks += update_smp_state(smp, cid, cs);
+
+ DBG("assign %s:%u, %u blks",
+ pipe2name(pipe), i, nblks);
+ }
+
+ set_fifo_thresholds(smp, pipe, nblks);
+ }
+
+ write_smp_alloc_regs(smp);
+ write_smp_fifo_regs(smp);
+
+ state->assigned = 0;
+}
+
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
+{
+ enum mdp5_pipe pipe;
+
+ for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) {
+ DBG("release %s", pipe2name(pipe));
+ set_fifo_thresholds(smp, pipe, 0);
+ }
+
+ write_smp_fifo_regs(smp);
+
+ state->released = 0;
+}
+
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ struct mdp5_hw_pipe_state *hwpstate;
+ struct mdp5_smp_state *state;
+ struct mdp5_global_state *global_state;
+ int total = 0, i, j;
+
+ drm_printf(p, "name\tinuse\tplane\n");
+ drm_printf(p, "----\t-----\t-----\n");
+
+ if (drm_can_sleep())
+ drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ /* grab these *after* we hold the state_lock */
+ hwpstate = &global_state->hwpipe;
+ state = &global_state->smp;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ for (j = 0; j < pipe2nclients(pipe); j++) {
+ u32 cid = pipe2client(pipe, j);
+ void *cs = state->client_state[cid];
+ int inuse = bitmap_weight(cs, smp->blk_cnt);
+
+ drm_printf(p, "%s:%d\t%d\t%s\n",
+ pipe2name(pipe), j, inuse,
+ plane ? plane->name : NULL);
+
+ total += inuse;
+ }
+ }
+
+ drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt);
+ drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt -
+ bitmap_weight(state->state, smp->blk_cnt));
+
+ if (drm_can_sleep())
+ drm_modeset_unlock(&mdp5_kms->glob_state_lock);
+}
+
+void mdp5_smp_destroy(struct mdp5_smp *smp)
+{
+ kfree(smp);
+}
+
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
+{
+ struct mdp5_smp_state *state;
+ struct mdp5_global_state *global_state;
+ struct mdp5_smp *smp = NULL;
+ int ret;
+
+ smp = kzalloc(sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ smp->dev = mdp5_kms->dev;
+ smp->blk_cnt = cfg->mmb_count;
+ smp->blk_size = cfg->mmb_size;
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+ state = &global_state->smp;
+
+ /* statically tied MMBs cannot be re-allocated: */
+ bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
+ memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
+
+ return smp;
+fail:
+ if (smp)
+ mdp5_smp_destroy(smp);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
new file mode 100644
index 000000000..ba5618e13
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP5_SMP_H__
+#define __MDP5_SMP_H__
+
+#include <drm/drm_print.h>
+
+#include "msm_drv.h"
+
+/*
+ * SMP - Shared Memory Pool:
+ *
+ * SMP blocks are shared between all the clients, where each plane in
+ * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on
+ * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR.
+ *
+ * Based on the size of the attached scanout buffer, a certain # of
+ * blocks must be allocated to that client out of the shared pool.
+ *
+ * In some hw, some blocks are statically allocated for certain pipes
+ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
+ *
+ *
+ * Atomic SMP State:
+ *
+ * On atomic updates that modify SMP configuration, the state is cloned
+ * (copied) and modified. For test-only, or in cases where atomic
+ * update fails (or if we hit ww_mutex deadlock/backoff condition) the
+ * new state is simply thrown away.
+ *
+ * Because the SMP registers are not double buffered, updates are a
+ * two step process:
+ *
+ * 1) in _prepare_commit() we configure things (via read-modify-write)
+ * for the newly assigned pipes, so we don't take away blocks
+ * assigned to pipes that are still scanning out
+ * 2) in _complete_commit(), after vblank/etc, we clear things for the
+ * released clients, since at that point old pipes are no longer
+ * scanning out.
+ */
+struct mdp5_smp_state {
+ /* global state of what blocks are in use: */
+ mdp5_smp_state_t state;
+
+ /* per client state of what blocks they are using: */
+ mdp5_smp_state_t client_state[MAX_CLIENTS];
+
+ /* assigned pipes (hw updated at _prepare_commit()): */
+ unsigned long assigned;
+
+ /* released pipes (hw updated at _complete_commit()): */
+ unsigned long released;
+};
+
+struct mdp5_kms;
+struct mdp5_smp;
+
+/*
+ * SMP module prototypes:
+ * mdp5_smp_init() returns a SMP @handler,
+ * which is then used to call the other mdp5_smp_*(handler, ...) functions.
+ */
+
+struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
+ const struct mdp5_smp_block *cfg);
+void mdp5_smp_destroy(struct mdp5_smp *smp);
+
+void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
+
+uint32_t mdp5_smp_calculate(struct mdp5_smp *smp,
+ const struct mdp_format *format,
+ u32 width, bool hdecim);
+
+int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe, uint32_t blkcfg);
+void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
+ enum mdp5_pipe pipe);
+
+void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
+void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state);
+
+#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
new file mode 100644
index 000000000..be759106b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -0,0 +1,111 @@
+#ifndef MDP_COMMON_XML
+#define MDP_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mdp_chroma_samp_type {
+ CHROMA_FULL = 0,
+ CHROMA_H2V1 = 1,
+ CHROMA_H1V2 = 2,
+ CHROMA_420 = 3,
+};
+
+enum mdp_fetch_type {
+ MDP_PLANE_INTERLEAVED = 0,
+ MDP_PLANE_PLANAR = 1,
+ MDP_PLANE_PSEUDO_PLANAR = 2,
+};
+
+enum mdp_mixer_stage_id {
+ STAGE_UNUSED = 0,
+ STAGE_BASE = 1,
+ STAGE0 = 2,
+ STAGE1 = 3,
+ STAGE2 = 4,
+ STAGE3 = 5,
+ STAGE4 = 6,
+ STAGE5 = 7,
+ STAGE6 = 8,
+ STAGE_MAX = 8,
+};
+
+enum mdp_alpha_type {
+ FG_CONST = 0,
+ BG_CONST = 1,
+ FG_PIXEL = 2,
+ BG_PIXEL = 3,
+};
+
+enum mdp_component_type {
+ COMP_0 = 0,
+ COMP_1_2 = 1,
+ COMP_3 = 2,
+ COMP_MAX = 3,
+};
+
+enum mdp_bpc {
+ BPC1 = 0,
+ BPC5 = 1,
+ BPC6 = 2,
+ BPC8 = 3,
+};
+
+enum mdp_bpc_alpha {
+ BPC1A = 0,
+ BPC4A = 1,
+ BPC6A = 2,
+ BPC8A = 3,
+};
+
+
+#endif /* MDP_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
new file mode 100644
index 000000000..025595336
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+static struct csc_cfg csc_convert[CSC_MAX] = {
+ [CSC_RGB2RGB] = {
+ .type = CSC_RGB2RGB,
+ .matrix = {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200
+ },
+ .pre_bias = { 0x0, 0x0, 0x0 },
+ .post_bias = { 0x0, 0x0, 0x0 },
+ .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
+ .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff },
+ },
+ [CSC_YUV2RGB] = {
+ .type = CSC_YUV2RGB,
+ .matrix = {
+ 0x0254, 0x0000, 0x0331,
+ 0x0254, 0xff37, 0xfe60,
+ 0x0254, 0x0409, 0x0000
+ },
+ .pre_bias = { 0xfff0, 0xff80, 0xff80 },
+ .post_bias = { 0x00, 0x00, 0x00 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ },
+ [CSC_RGB2YUV] = {
+ .type = CSC_RGB2YUV,
+ .matrix = {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ .pre_bias = { 0x00, 0x00, 0x00 },
+ .post_bias = { 0x10, 0x80, 0x80 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 },
+ },
+ [CSC_YUV2YUV] = {
+ .type = CSC_YUV2YUV,
+ .matrix = {
+ 0x0200, 0x0000, 0x0000,
+ 0x0000, 0x0200, 0x0000,
+ 0x0000, 0x0000, 0x0200
+ },
+ .pre_bias = { 0x00, 0x00, 0x00 },
+ .post_bias = { 0x00, 0x00, 0x00 },
+ .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff },
+ },
+};
+
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \
+ .base = { .pixel_format = DRM_FORMAT_ ## name }, \
+ .bpc_a = BPC ## a ## A, \
+ .bpc_r = BPC ## r, \
+ .bpc_g = BPC ## g, \
+ .bpc_b = BPC ## b, \
+ .unpack = { e0, e1, e2, e3 }, \
+ .alpha_enable = alpha, \
+ .unpack_tight = tight, \
+ .cpp = c, \
+ .unpack_count = cnt, \
+ .fetch_type = fp, \
+ .chroma_sample = cs, \
+ .is_yuv = yuv, \
+}
+
+#define BPC0A 0
+
+/*
+ * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking
+ * mdp_get_rgb_formats()'s implementation.
+ */
+static const struct mdp_format formats[] = {
+ /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
+ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+
+ /* --- RGB formats above / YUV formats below this line --- */
+
+ /* 2 plane YUV */
+ FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+ FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true),
+ FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2,
+ MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true),
+ /* 1 plane YUV */
+ FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true),
+ /* 3 plane YUV */
+ FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
+ FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1,
+ MDP_PLANE_PLANAR, CHROMA_420, true),
+};
+
+/*
+ * Note:
+ * @rgb_only must be set to true, when requesting
+ * supported formats for RGB pipes.
+ */
+uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
+ bool rgb_only)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp_format *f = &formats[i];
+
+ if (i == max_formats)
+ break;
+
+ if (rgb_only && MDP_FORMAT_IS_YUV(f))
+ break;
+
+ pixel_formats[i] = f->base.pixel_format;
+ }
+
+ return i;
+}
+
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
+ uint64_t modifier)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp_format *f = &formats[i];
+ if (f->base.pixel_format == format)
+ return &f->base;
+ }
+ return NULL;
+}
+
+struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type)
+{
+ if (WARN_ON(type >= CSC_MAX))
+ return NULL;
+
+ return &csc_convert[type];
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c
new file mode 100644
index 000000000..3c35ccfc7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+
+#include "msm_drv.h"
+#include "mdp_kms.h"
+
+
+struct mdp_irq_wait {
+ struct mdp_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp_kms *mdp_kms)
+{
+ struct mdp_irq *irq;
+ uint32_t irqmask = mdp_kms->vblank_mask;
+
+ assert_spin_locked(&list_lock);
+
+ list_for_each_entry(irq, &mdp_kms->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
+ mdp_kms->cur_irq_mask = irqmask;
+}
+
+/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
+ * link changes, this must be called to figure out the new global irqmask
+ */
+void mdp_irq_update(struct mdp_kms *mdp_kms)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list_lock, flags);
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
+{
+ struct mdp_irq *handler, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp_kms->in_irq = true;
+ list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
+ if (handler->irqmask & status) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & status);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ mdp_kms->in_irq = false;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+}
+
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ if (enable)
+ mdp_kms->vblank_mask |= mask;
+ else
+ mdp_kms->vblank_mask &= ~mask;
+ update_irq(mdp_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+ struct mdp_irq_wait *wait =
+ container_of(irq, struct mdp_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
+{
+ struct mdp_irq_wait wait = {
+ .irq = {
+ .irq = wait_irq,
+ .irqmask = irqmask,
+ },
+ .count = 1,
+ };
+ mdp_irq_register(mdp_kms, &wait.irq);
+ wait_event_timeout(wait_event, (wait.count <= 0),
+ msecs_to_jiffies(100));
+ mdp_irq_unregister(mdp_kms, &wait.irq);
+}
+
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!irq->registered) {
+ irq->registered = true;
+ list_add(&irq->node, &mdp_kms->irq_list);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ mdp_irq_update(mdp_kms);
+}
+
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (irq->registered) {
+ irq->registered = false;
+ list_del(&irq->node);
+ needs_update = !mdp_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ mdp_irq_update(mdp_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
new file mode 100644
index 000000000..b0286d5d5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MDP_KMS_H__
+#define __MDP_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp_common.xml.h"
+
+struct mdp_kms;
+
+struct mdp_kms_funcs {
+ struct msm_kms_funcs base;
+ void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask,
+ uint32_t old_irqmask);
+};
+
+struct mdp_kms {
+ struct msm_kms base;
+
+ const struct mdp_kms_funcs *funcs;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ uint32_t cur_irq_mask; /* current irq mask */
+};
+#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
+
+static inline int mdp_kms_init(struct mdp_kms *mdp_kms,
+ const struct mdp_kms_funcs *funcs)
+{
+ mdp_kms->funcs = funcs;
+ INIT_LIST_HEAD(&mdp_kms->irq_list);
+ return msm_kms_init(&mdp_kms->base, &funcs->base);
+}
+
+static inline void mdp_kms_destroy(struct mdp_kms *mdp_kms)
+{
+ msm_kms_destroy(&mdp_kms->base);
+}
+
+/*
+ * irq helpers:
+ */
+
+/* For transiently registering for different MDP irqs that various parts
+ * of the KMS code need during setup/configuration. These are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct mdp_irq *irq, uint32_t irqstatus);
+};
+
+void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status);
+void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable);
+void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
+void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
+void mdp_irq_update(struct mdp_kms *mdp_kms);
+
+/*
+ * pixel format helpers:
+ */
+
+struct mdp_format {
+ struct msm_format base;
+ enum mdp_bpc bpc_r, bpc_g, bpc_b;
+ enum mdp_bpc_alpha bpc_a;
+ uint8_t unpack[4];
+ bool alpha_enable, unpack_tight;
+ uint8_t cpp, unpack_count;
+ enum mdp_fetch_type fetch_type;
+ enum mdp_chroma_samp_type chroma_sample;
+ bool is_yuv;
+};
+#define to_mdp_format(x) container_of(x, struct mdp_format, base)
+#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
+
+uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
+
+/* MDP capabilities */
+#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
+#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
+#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
+#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
+
+/* MDP pipe capabilities */
+#define MDP_PIPE_CAP_HFLIP BIT(0)
+#define MDP_PIPE_CAP_VFLIP BIT(1)
+#define MDP_PIPE_CAP_SCALE BIT(2)
+#define MDP_PIPE_CAP_CSC BIT(3)
+#define MDP_PIPE_CAP_DECIMATION BIT(4)
+#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
+#define MDP_PIPE_CAP_CURSOR BIT(6)
+
+/* MDP layer mixer caps */
+#define MDP_LM_CAP_DISPLAY BIT(0)
+#define MDP_LM_CAP_WB BIT(1)
+#define MDP_LM_CAP_PAIR BIT(2)
+
+static inline bool pipe_supports_yuv(uint32_t pipe_caps)
+{
+ return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
+ (pipe_caps & MDP_PIPE_CAP_CSC);
+}
+
+enum csc_type {
+ CSC_RGB2RGB = 0,
+ CSC_YUV2RGB,
+ CSC_RGB2YUV,
+ CSC_YUV2YUV,
+ CSC_MAX
+};
+
+struct csc_cfg {
+ enum csc_type type;
+ uint32_t matrix[9];
+ uint32_t pre_bias[3];
+ uint32_t post_bias[3];
+ uint32_t pre_clamp[6];
+ uint32_t post_clamp[6];
+};
+
+struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type);
+
+#endif /* __MDP_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
new file mode 100644
index 000000000..e75b97127
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_disp_snapshot.h"
+
+static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_disp_state *disp_state;
+
+ disp_state = data;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ msm_disp_state_print(disp_state, &p);
+
+ return count - iter.remain;
+}
+
+struct msm_disp_state *
+msm_disp_snapshot_state_sync(struct msm_kms *kms)
+{
+ struct drm_device *drm_dev = kms->dev;
+ struct msm_disp_state *disp_state;
+
+ WARN_ON(!mutex_is_locked(&kms->dump_mutex));
+
+ disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL);
+ if (!disp_state)
+ return ERR_PTR(-ENOMEM);
+
+ disp_state->dev = drm_dev->dev;
+ disp_state->drm_dev = drm_dev;
+
+ INIT_LIST_HEAD(&disp_state->blocks);
+
+ msm_disp_snapshot_capture_state(disp_state);
+
+ return disp_state;
+}
+
+static void _msm_disp_snapshot_work(struct kthread_work *work)
+{
+ struct msm_kms *kms = container_of(work, struct msm_kms, dump_work);
+ struct msm_disp_state *disp_state;
+ struct drm_printer p;
+
+ /* Serialize dumping here */
+ mutex_lock(&kms->dump_mutex);
+ disp_state = msm_disp_snapshot_state_sync(kms);
+ mutex_unlock(&kms->dump_mutex);
+
+ if (IS_ERR(disp_state))
+ return;
+
+ if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) {
+ p = drm_info_printer(disp_state->drm_dev->dev);
+ msm_disp_state_print(disp_state, &p);
+ }
+
+ /*
+ * If COREDUMP is disabled, the stub will call the free function.
+ * If there is a codedump pending for the device, the dev_coredumpm()
+ * will also free new coredump state.
+ */
+ dev_coredumpm(disp_state->dev, THIS_MODULE, disp_state, 0, GFP_KERNEL,
+ disp_devcoredump_read, msm_disp_state_free);
+}
+
+void msm_disp_snapshot_state(struct drm_device *drm_dev)
+{
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ kthread_queue_work(kms->dump_worker, &kms->dump_work);
+}
+
+int msm_disp_snapshot_init(struct drm_device *drm_dev)
+{
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ mutex_init(&kms->dump_mutex);
+
+ kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot");
+ if (IS_ERR(kms->dump_worker))
+ DRM_ERROR("failed to create disp state task\n");
+
+ kthread_init_work(&kms->dump_work, _msm_disp_snapshot_work);
+
+ return 0;
+}
+
+void msm_disp_snapshot_destroy(struct drm_device *drm_dev)
+{
+ struct msm_kms *kms;
+ struct msm_drm_private *priv;
+
+ if (!drm_dev) {
+ DRM_ERROR("invalid params\n");
+ return;
+ }
+
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ if (kms->dump_worker)
+ kthread_destroy_worker(kms->dump_worker);
+
+ mutex_destroy(&kms->dump_mutex);
+}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
new file mode 100644
index 000000000..b5f452bd7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MSM_DISP_SNAPSHOT_H_
+#define MSM_DISP_SNAPSHOT_H_
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include "../../../drm_crtc_internal.h"
+#include <drm/drm_print.h>
+#include <drm/drm_atomic.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/devcoredump.h>
+#include "msm_kms.h"
+
+#define MSM_DISP_SNAPSHOT_MAX_BLKS 10
+
+/* debug option to print the registers in logs */
+#define MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE 0
+
+/* print debug ranges in groups of 4 u32s */
+#define REG_DUMP_ALIGN 16
+
+/**
+ * struct msm_disp_state - structure to store current dpu state
+ * @dev: device pointer
+ * @drm_dev: drm device pointer
+ * @atomic_state: atomic state duplicated at the time of the error
+ * @time: timestamp at which the coredump was captured
+ */
+struct msm_disp_state {
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ struct list_head blocks;
+
+ struct drm_atomic_state *atomic_state;
+
+ struct timespec64 time;
+};
+
+/**
+ * struct msm_disp_state_block - structure to store each hardware block state
+ * @name: name of the block
+ * @drm_dev: handle to the linked list head
+ * @size: size of the register space of this hardware block
+ * @state: array holding the register dump of this hardware block
+ * @base_addr: starting address of this hardware block's register space
+ */
+struct msm_disp_state_block {
+ char name[SZ_128];
+ struct list_head node;
+ unsigned int size;
+ u32 *state;
+ void __iomem *base_addr;
+};
+
+/**
+ * msm_disp_snapshot_init - initialize display snapshot
+ * @drm_dev: drm device handle
+ *
+ * Returns: 0 or -ERROR
+ */
+int msm_disp_snapshot_init(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_snapshot_destroy - destroy the display snapshot
+ * @drm_dev: drm device handle
+ *
+ * Returns: none
+ */
+void msm_disp_snapshot_destroy(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_snapshot_state_sync - synchronously snapshot display state
+ * @kms: the kms object
+ *
+ * Returns state or error
+ *
+ * Must be called with &kms->dump_mutex held
+ */
+struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms);
+
+/**
+ * msm_disp_snapshot_state - trigger to dump the display snapshot
+ * @drm_dev: handle to drm device
+
+ * Returns: none
+ */
+void msm_disp_snapshot_state(struct drm_device *drm_dev);
+
+/**
+ * msm_disp_state_print - print out the current dpu state
+ * @disp_state: handle to drm device
+ * @p: handle to drm printer
+ *
+ * Returns: none
+ */
+void msm_disp_state_print(struct msm_disp_state *disp_state, struct drm_printer *p);
+
+/**
+ * msm_disp_snapshot_capture_state - utility to capture atomic state and hw registers
+ * @disp_state: handle to msm_disp_state struct
+
+ * Returns: none
+ */
+void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state);
+
+/**
+ * msm_disp_state_free - free the memory after the coredump has been read
+ * @data: handle to struct msm_disp_state
+
+ * Returns: none
+ */
+void msm_disp_state_free(void *data);
+
+/**
+ * msm_disp_snapshot_add_block - add a hardware block with its register dump
+ * @disp_state: handle to struct msm_disp_state
+ * @name: name of the hardware block
+ * @len: size of the register space of the hardware block
+ * @base_addr: starting address of the register space of the hardware block
+ * @fmt: format in which the block names need to be printed
+ *
+ * Returns: none
+ */
+__printf(4, 5)
+void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ void __iomem *base_addr, const char *fmt, ...);
+
+#endif /* MSM_DISP_SNAPSHOT_H_ */
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
new file mode 100644
index 000000000..add72bbc2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <generated/utsrelease.h>
+
+#include "msm_disp_snapshot.h"
+
+static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr)
+{
+ u32 len_padded;
+ u32 num_rows;
+ u32 x0, x4, x8, xc;
+ void __iomem *addr;
+ u32 *dump_addr = NULL;
+ void __iomem *end_addr;
+ int i;
+
+ len_padded = aligned_len * REG_DUMP_ALIGN;
+ num_rows = aligned_len / REG_DUMP_ALIGN;
+
+ addr = base_addr;
+ end_addr = base_addr + aligned_len;
+
+ if (!(*reg))
+ *reg = kzalloc(len_padded, GFP_KERNEL);
+
+ if (*reg)
+ dump_addr = *reg;
+
+ for (i = 0; i < num_rows; i++) {
+ x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
+ x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
+ x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
+ xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
+
+ if (dump_addr) {
+ dump_addr[i * 4] = x0;
+ dump_addr[i * 4 + 1] = x4;
+ dump_addr[i * 4 + 2] = x8;
+ dump_addr[i * 4 + 3] = xc;
+ }
+
+ addr += REG_DUMP_ALIGN;
+ }
+}
+
+static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
+ struct drm_printer *p)
+{
+ int i;
+ u32 *dump_addr = NULL;
+ void __iomem *addr;
+ u32 num_rows;
+
+ addr = base_addr;
+ num_rows = len / REG_DUMP_ALIGN;
+
+ if (*reg)
+ dump_addr = *reg;
+
+ for (i = 0; i < num_rows; i++) {
+ drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
+ (unsigned long)(addr - base_addr),
+ dump_addr[i * 4], dump_addr[i * 4 + 1],
+ dump_addr[i * 4 + 2], dump_addr[i * 4 + 3]);
+ addr += REG_DUMP_ALIGN;
+ }
+}
+
+void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
+{
+ struct msm_disp_state_block *block, *tmp;
+
+ if (!p) {
+ DRM_ERROR("invalid drm printer\n");
+ return;
+ }
+
+ drm_printf(p, "---\n");
+ drm_printf(p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(p, "dpu devcoredump\n");
+ drm_printf(p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+
+ list_for_each_entry_safe(block, tmp, &state->blocks, node) {
+ drm_printf(p, "====================%s================\n", block->name);
+ msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
+ }
+
+ drm_printf(p, "===================dpu drm state================\n");
+
+ if (state->atomic_state)
+ drm_atomic_print_new_state(state->atomic_state, p);
+}
+
+static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+
+ ktime_get_real_ts64(&disp_state->time);
+
+ ddev = disp_state->drm_dev;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(ddev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
+ disp_state->atomic_state = drm_atomic_helper_duplicate_state(ddev,
+ &ctx);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
+{
+ struct msm_drm_private *priv;
+ struct drm_device *drm_dev;
+ struct msm_kms *kms;
+ int i;
+
+ drm_dev = disp_state->drm_dev;
+ priv = drm_dev->dev_private;
+ kms = priv->kms;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
+
+ msm_dp_snapshot(disp_state, priv->dp[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i])
+ continue;
+
+ msm_dsi_snapshot(disp_state, priv->dsi[i]);
+ }
+
+ if (kms->funcs->snapshot)
+ kms->funcs->snapshot(disp_state, kms);
+
+ msm_disp_capture_atomic_state(disp_state);
+}
+
+void msm_disp_state_free(void *data)
+{
+ struct msm_disp_state *disp_state = data;
+ struct msm_disp_state_block *block, *tmp;
+
+ if (disp_state->atomic_state) {
+ drm_atomic_state_put(disp_state->atomic_state);
+ disp_state->atomic_state = NULL;
+ }
+
+ list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
+ list_del(&block->node);
+ kfree(block->state);
+ kfree(block);
+ }
+
+ kfree(disp_state);
+}
+
+void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
+ void __iomem *base_addr, const char *fmt, ...)
+{
+ struct msm_disp_state_block *new_blk;
+ struct va_format vaf;
+ va_list va;
+
+ new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+ if (!new_blk)
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+ snprintf(new_blk->name, sizeof(new_blk->name), "%pV", &vaf);
+
+ va_end(va);
+
+ INIT_LIST_HEAD(&new_blk->node);
+ new_blk->size = ALIGN(len, REG_DUMP_ALIGN);
+ new_blk->base_addr = base_addr;
+
+ msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr);
+ list_add_tail(&new_blk->node, &disp_state->blocks);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
new file mode 100644
index 000000000..1245c7aa4
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/of_platform.h>
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+
+#include "dp_catalog.h"
+#include "dp_audio.h"
+#include "dp_panel.h"
+#include "dp_display.h"
+
+#define HEADER_BYTE_2_BIT 0
+#define PARITY_BYTE_2_BIT 8
+#define HEADER_BYTE_1_BIT 16
+#define PARITY_BYTE_1_BIT 24
+#define HEADER_BYTE_3_BIT 16
+#define PARITY_BYTE_3_BIT 24
+
+struct dp_audio_private {
+ struct platform_device *audio_pdev;
+ struct platform_device *pdev;
+ struct drm_device *drm_dev;
+ struct dp_catalog *catalog;
+ struct dp_panel *panel;
+
+ bool engine_on;
+ u32 channels;
+
+ struct dp_audio dp_audio;
+};
+
+static u8 dp_audio_get_g0_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[3];
+ g[1] = c[0] ^ c[3];
+ g[2] = c[1];
+ g[3] = c[2];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static u8 dp_audio_get_g1_value(u8 data)
+{
+ u8 c[4];
+ u8 g[4];
+ u8 ret_data = 0;
+ u8 i;
+
+ for (i = 0; i < 4; i++)
+ c[i] = (data >> i) & 0x01;
+
+ g[0] = c[0] ^ c[3];
+ g[1] = c[0] ^ c[1] ^ c[3];
+ g[2] = c[1] ^ c[2];
+ g[3] = c[2] ^ c[3];
+
+ for (i = 0; i < 4; i++)
+ ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+ return ret_data;
+}
+
+static u8 dp_audio_calculate_parity(u32 data)
+{
+ u8 x0 = 0;
+ u8 x1 = 0;
+ u8 ci = 0;
+ u8 iData = 0;
+ u8 i = 0;
+ u8 parity_byte;
+ u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
+
+ for (i = 0; i < num_byte; i++) {
+ iData = (data >> i*4) & 0xF;
+
+ ci = iData ^ x1;
+ x1 = x0 ^ dp_audio_get_g1_value(ci);
+ x0 = dp_audio_get_g0_value(ci);
+ }
+
+ parity_byte = x1 | (x0 << 4);
+
+ return parity_byte;
+}
+
+static u32 dp_audio_get_header(struct dp_catalog *catalog,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header)
+{
+ catalog->sdp_type = sdp;
+ catalog->sdp_header = header;
+ dp_catalog_audio_get_header(catalog);
+
+ return catalog->audio_data;
+}
+
+static void dp_audio_set_header(struct dp_catalog *catalog,
+ u32 data,
+ enum dp_catalog_audio_sdp_type sdp,
+ enum dp_catalog_audio_header_type header)
+{
+ catalog->sdp_type = sdp;
+ catalog->sdp_header = header;
+ catalog->audio_data = data;
+ dp_catalog_audio_set_header(catalog);
+}
+
+static void dp_audio_stream_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x02;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+ new_value = value;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = audio->channels - 1;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x1;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x17;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = (0x0 | (0x11 << 2));
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x84;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x1b;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = (0x0 | (0x11 << 2));
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ new_value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x05;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x0F;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+ /* Config header and parity byte 3 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+
+ new_value = 0x0;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_3_BIT)
+ | (parity_byte << PARITY_BYTE_3_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 value, new_value;
+ u8 parity_byte;
+
+ /* Config header and parity byte 1 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+ new_value = 0x06;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_1_BIT)
+ | (parity_byte << PARITY_BYTE_1_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+ /* Config header and parity byte 2 */
+ value = dp_audio_get_header(catalog,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+
+ new_value = 0x0F;
+ parity_byte = dp_audio_calculate_parity(new_value);
+ value |= ((new_value << HEADER_BYTE_2_BIT)
+ | (parity_byte << PARITY_BYTE_2_BIT));
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
+ dp_audio_set_header(catalog, value,
+ DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+}
+
+static void dp_audio_setup_sdp(struct dp_audio_private *audio)
+{
+ dp_catalog_audio_config_sdp(audio->catalog);
+
+ dp_audio_stream_sdp(audio);
+ dp_audio_timestamp_sdp(audio);
+ dp_audio_infoframe_sdp(audio);
+ dp_audio_copy_management_sdp(audio);
+ dp_audio_isrc_sdp(audio);
+}
+
+static void dp_audio_setup_acr(struct dp_audio_private *audio)
+{
+ u32 select = 0;
+ struct dp_catalog *catalog = audio->catalog;
+
+ switch (audio->dp_audio.bw_code) {
+ case DP_LINK_BW_1_62:
+ select = 0;
+ break;
+ case DP_LINK_BW_2_7:
+ select = 1;
+ break;
+ case DP_LINK_BW_5_4:
+ select = 2;
+ break;
+ case DP_LINK_BW_8_1:
+ select = 3;
+ break;
+ default:
+ drm_dbg_dp(audio->drm_dev, "Unknown link rate\n");
+ select = 0;
+ break;
+ }
+
+ catalog->audio_data = select;
+ dp_catalog_audio_config_acr(catalog);
+}
+
+static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
+{
+ struct dp_catalog *catalog = audio->catalog;
+ u32 safe_to_exit_level = 0;
+
+ switch (audio->dp_audio.lane_count) {
+ case 1:
+ safe_to_exit_level = 14;
+ break;
+ case 2:
+ safe_to_exit_level = 8;
+ break;
+ case 4:
+ safe_to_exit_level = 5;
+ break;
+ default:
+ drm_dbg_dp(audio->drm_dev,
+ "setting the default safe_to_exit_level = %u\n",
+ safe_to_exit_level);
+ safe_to_exit_level = 14;
+ break;
+ }
+
+ catalog->audio_data = safe_to_exit_level;
+ dp_catalog_audio_sfe_level(catalog);
+}
+
+static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
+{
+ struct dp_catalog *catalog = audio->catalog;
+
+ catalog->audio_data = enable;
+ dp_catalog_audio_enable(catalog);
+
+ audio->engine_on = enable;
+}
+
+static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
+{
+ struct dp_audio *dp_audio;
+ struct msm_dp *dp_display;
+
+ if (!pdev) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ dp_display = platform_get_drvdata(pdev);
+ if (!dp_display) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ dp_audio = dp_display->dp_audio;
+
+ if (!dp_audio) {
+ DRM_ERROR("invalid dp_audio data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return container_of(dp_audio, struct dp_audio_private, dp_audio);
+}
+
+static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+ if (!pdev) {
+ pr_err("invalid input\n");
+ return -ENODEV;
+ }
+
+ dp_display = platform_get_drvdata(pdev);
+ if (!dp_display) {
+ pr_err("invalid input\n");
+ return -ENODEV;
+ }
+
+ return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
+}
+
+static int dp_audio_get_eld(struct device *dev,
+ void *data, uint8_t *buf, size_t len)
+{
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+
+ if (!pdev) {
+ DRM_ERROR("invalid input\n");
+ return -ENODEV;
+ }
+
+ dp_display = platform_get_drvdata(pdev);
+ if (!dp_display) {
+ DRM_ERROR("invalid input\n");
+ return -ENODEV;
+ }
+
+ memcpy(buf, dp_display->connector->eld,
+ min(sizeof(dp_display->connector->eld), len));
+
+ return 0;
+}
+
+int dp_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ int rc = 0;
+ struct dp_audio_private *audio;
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+ dp_display = platform_get_drvdata(pdev);
+
+ /*
+ * there could be cases where sound card can be opened even
+ * before OR even when DP is not connected . This can cause
+ * unclocked access as the audio subsystem relies on the DP
+ * driver to maintain the correct state of clocks. To protect
+ * such cases check for connection status and bail out if not
+ * connected.
+ */
+ if (!dp_display->power_on) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ audio = dp_audio_get_data(pdev);
+ if (IS_ERR(audio)) {
+ rc = PTR_ERR(audio);
+ goto end;
+ }
+
+ audio->channels = params->channels;
+
+ dp_audio_setup_sdp(audio);
+ dp_audio_setup_acr(audio);
+ dp_audio_safe_to_exit_level(audio);
+ dp_audio_enable(audio, true);
+ dp_display_signal_audio_start(dp_display);
+ dp_display->audio_enabled = true;
+
+end:
+ return rc;
+}
+
+static void dp_audio_shutdown(struct device *dev, void *data)
+{
+ struct dp_audio_private *audio;
+ struct platform_device *pdev;
+ struct msm_dp *dp_display;
+
+ pdev = to_platform_device(dev);
+ dp_display = platform_get_drvdata(pdev);
+ audio = dp_audio_get_data(pdev);
+ if (IS_ERR(audio)) {
+ DRM_ERROR("failed to get audio data\n");
+ return;
+ }
+
+ /*
+ * if audio was not enabled there is no need
+ * to execute the shutdown and we can bail out early.
+ * This also makes sure that we dont cause an unclocked
+ * access when audio subsystem calls this without DP being
+ * connected. is_connected cannot be used here as its set
+ * to false earlier than this call
+ */
+ if (!dp_display->audio_enabled)
+ return;
+
+ dp_audio_enable(audio, false);
+ /* signal the dp display to safely shutdown clocks */
+ dp_display_signal_audio_complete(dp_display);
+}
+
+static const struct hdmi_codec_ops dp_audio_codec_ops = {
+ .hw_params = dp_audio_hw_params,
+ .audio_shutdown = dp_audio_shutdown,
+ .get_eld = dp_audio_get_eld,
+ .hook_plugged_cb = dp_audio_hook_plugged_cb,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &dp_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+};
+
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
+{
+ struct dp_audio_private *audio_priv;
+
+ audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+ if (audio_priv->audio_pdev) {
+ platform_device_unregister(audio_priv->audio_pdev);
+ audio_priv->audio_pdev = NULL;
+ }
+}
+
+int dp_register_audio_driver(struct device *dev,
+ struct dp_audio *dp_audio)
+{
+ struct dp_audio_private *audio_priv;
+
+ audio_priv = container_of(dp_audio,
+ struct dp_audio_private, dp_audio);
+
+ audio_priv->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
+}
+
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+ struct dp_panel *panel,
+ struct dp_catalog *catalog)
+{
+ int rc = 0;
+ struct dp_audio_private *audio;
+ struct dp_audio *dp_audio;
+
+ if (!pdev || !panel || !catalog) {
+ DRM_ERROR("invalid input\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
+ if (!audio) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ audio->pdev = pdev;
+ audio->panel = panel;
+ audio->catalog = catalog;
+
+ dp_audio = &audio->dp_audio;
+
+ dp_catalog_audio_init(catalog);
+
+ return dp_audio;
+error:
+ return ERR_PTR(rc);
+}
+
+void dp_audio_put(struct dp_audio *dp_audio)
+{
+ struct dp_audio_private *audio;
+
+ if (!dp_audio)
+ return;
+
+ audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+ devm_kfree(&audio->pdev->dev, audio);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
new file mode 100644
index 000000000..4ab78880a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_audio.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUDIO_H_
+#define _DP_AUDIO_H_
+
+#include <linux/platform_device.h>
+
+#include "dp_panel.h"
+#include "dp_catalog.h"
+#include <sound/hdmi-codec.h>
+
+/**
+ * struct dp_audio
+ * @lane_count: number of lanes configured in current session
+ * @bw_code: link rate's bandwidth code for current session
+ */
+struct dp_audio {
+ u32 lane_count;
+ u32 bw_code;
+};
+
+/**
+ * dp_audio_get()
+ *
+ * Creates and instance of dp audio.
+ *
+ * @pdev: caller's platform device instance.
+ * @panel: an instance of dp_panel module.
+ * @catalog: an instance of dp_catalog module.
+ *
+ * Returns the error code in case of failure, otherwize
+ * an instance of newly created dp_module.
+ */
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+ struct dp_panel *panel,
+ struct dp_catalog *catalog);
+
+/**
+ * dp_register_audio_driver()
+ *
+ * Registers DP device with hdmi_codec interface.
+ *
+ * @dev: DP device instance.
+ * @dp_audio: an instance of dp_audio module.
+ *
+ *
+ * Returns the error code in case of failure, otherwise
+ * zero on success.
+ */
+int dp_register_audio_driver(struct device *dev,
+ struct dp_audio *dp_audio);
+
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio);
+
+/**
+ * dp_audio_put()
+ *
+ * Cleans the dp_audio instance.
+ *
+ * @dp_audio: an instance of dp_audio.
+ */
+void dp_audio_put(struct dp_audio *dp_audio);
+
+int dp_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+
+#endif /* _DP_AUDIO_H_ */
+
+
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
new file mode 100644
index 000000000..84f9e3e5f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_aux.h"
+
+enum msm_dp_aux_err {
+ DP_AUX_ERR_NONE,
+ DP_AUX_ERR_ADDR,
+ DP_AUX_ERR_TOUT,
+ DP_AUX_ERR_NACK,
+ DP_AUX_ERR_DEFER,
+ DP_AUX_ERR_NACK_DEFER,
+ DP_AUX_ERR_PHY,
+};
+
+struct dp_aux_private {
+ struct device *dev;
+ struct dp_catalog *catalog;
+
+ struct mutex mutex;
+ struct completion comp;
+
+ enum msm_dp_aux_err aux_error_num;
+ u32 retry_cnt;
+ bool cmd_busy;
+ bool native;
+ bool read;
+ bool no_send_addr;
+ bool no_send_stop;
+ bool initted;
+ bool is_edp;
+ u32 offset;
+ u32 segment;
+
+ struct drm_dp_aux dp_aux;
+};
+
+#define MAX_AUX_RETRIES 5
+
+static ssize_t dp_aux_write(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u8 data[4];
+ u32 reg;
+ ssize_t len;
+ u8 *msgdata = msg->buffer;
+ int const AUX_CMD_FIFO_LEN = 128;
+ int i = 0;
+
+ if (aux->read)
+ len = 0;
+ else
+ len = msg->size;
+
+ /*
+ * cmd fifo only has depth of 144 bytes
+ * limit buf length to 128 bytes here
+ */
+ if (len > AUX_CMD_FIFO_LEN - 4) {
+ DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
+ return -EINVAL;
+ }
+
+ /* Pack cmd and write to HW */
+ data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+ if (aux->read)
+ data[0] |= BIT(4); /* R/W */
+
+ data[1] = msg->address >> 8; /* addr[15:8] */
+ data[2] = msg->address; /* addr[7:0] */
+ data[3] = msg->size - 1; /* len[7:0] */
+
+ for (i = 0; i < len + 4; i++) {
+ reg = (i < 4) ? data[i] : msgdata[i - 4];
+ reg <<= DP_AUX_DATA_OFFSET;
+ reg &= DP_AUX_DATA_MASK;
+ reg |= DP_AUX_DATA_WRITE;
+ /* index = 0, write */
+ if (i == 0)
+ reg |= DP_AUX_DATA_INDEX_WRITE;
+ aux->catalog->aux_data = reg;
+ dp_catalog_aux_write_data(aux->catalog);
+ }
+
+ dp_catalog_aux_clear_trans(aux->catalog, false);
+ dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+
+ reg = 0; /* Transaction number == 1 */
+ if (!aux->native) { /* i2c */
+ reg |= DP_AUX_TRANS_CTRL_I2C;
+
+ if (aux->no_send_addr)
+ reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
+
+ if (aux->no_send_stop)
+ reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
+ }
+
+ reg |= DP_AUX_TRANS_CTRL_GO;
+ aux->catalog->aux_data = reg;
+ dp_catalog_aux_write_trans(aux->catalog);
+
+ return len;
+}
+
+static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ ssize_t ret;
+ unsigned long time_left;
+
+ reinit_completion(&aux->comp);
+
+ ret = dp_aux_write(aux, msg);
+ if (ret < 0)
+ return ret;
+
+ time_left = wait_for_completion_timeout(&aux->comp,
+ msecs_to_jiffies(250));
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return ret;
+}
+
+static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ u32 data;
+ u8 *dp;
+ u32 i, actual_i;
+ u32 len = msg->size;
+
+ dp_catalog_aux_clear_trans(aux->catalog, true);
+
+ data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
+ data |= DP_AUX_DATA_READ; /* read */
+
+ aux->catalog->aux_data = data;
+ dp_catalog_aux_write_data(aux->catalog);
+
+ dp = msg->buffer;
+
+ /* discard first byte */
+ data = dp_catalog_aux_read_data(aux->catalog);
+
+ for (i = 0; i < len; i++) {
+ data = dp_catalog_aux_read_data(aux->catalog);
+ *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
+
+ actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
+ if (i != actual_i)
+ break;
+ }
+
+ return i;
+}
+
+static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *input_msg)
+{
+ u32 edid_address = 0x50;
+ u32 segment_address = 0x30;
+ bool i2c_read = input_msg->request &
+ (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+ u8 *data;
+
+ if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
+ (input_msg->address != segment_address)))
+ return;
+
+
+ data = input_msg->buffer;
+ if (input_msg->address == segment_address)
+ aux->segment = *data;
+ else
+ aux->offset = *data;
+}
+
+/**
+ * dp_aux_transfer_helper() - helper function for EDID read transactions
+ *
+ * @aux: DP AUX private structure
+ * @input_msg: input message from DRM upstream APIs
+ * @send_seg: send the segment to sink
+ *
+ * return: void
+ *
+ * This helper function is used to fix EDID reads for non-compliant
+ * sinks that do not handle the i2c middle-of-transaction flag correctly.
+ */
+static void dp_aux_transfer_helper(struct dp_aux_private *aux,
+ struct drm_dp_aux_msg *input_msg,
+ bool send_seg)
+{
+ struct drm_dp_aux_msg helper_msg;
+ u32 message_size = 0x10;
+ u32 segment_address = 0x30;
+ u32 const edid_block_length = 0x80;
+ bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
+ bool i2c_read = input_msg->request &
+ (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+
+ if (!i2c_mot || !i2c_read || (input_msg->size == 0))
+ return;
+
+ /*
+ * Sending the segment value and EDID offset will be performed
+ * from the DRM upstream EDID driver for each block. Avoid
+ * duplicate AUX transactions related to this while reading the
+ * first 16 bytes of each block.
+ */
+ if (!(aux->offset % edid_block_length) || !send_seg)
+ goto end;
+
+ aux->read = false;
+ aux->cmd_busy = true;
+ aux->no_send_addr = true;
+ aux->no_send_stop = true;
+
+ /*
+ * Send the segment address for every i2c read in which the
+ * middle-of-tranaction flag is set. This is required to support EDID
+ * reads of more than 2 blocks as the segment address is reset to 0
+ * since we are overriding the middle-of-transaction flag for read
+ * transactions.
+ */
+
+ if (aux->segment) {
+ memset(&helper_msg, 0, sizeof(helper_msg));
+ helper_msg.address = segment_address;
+ helper_msg.buffer = &aux->segment;
+ helper_msg.size = 1;
+ dp_aux_cmd_fifo_tx(aux, &helper_msg);
+ }
+
+ /*
+ * Send the offset address for every i2c read in which the
+ * middle-of-transaction flag is set. This will ensure that the sink
+ * will update its read pointer and return the correct portion of the
+ * EDID buffer in the subsequent i2c read trasntion triggered in the
+ * native AUX transfer function.
+ */
+ memset(&helper_msg, 0, sizeof(helper_msg));
+ helper_msg.address = input_msg->address;
+ helper_msg.buffer = &aux->offset;
+ helper_msg.size = 1;
+ dp_aux_cmd_fifo_tx(aux, &helper_msg);
+
+end:
+ aux->offset += message_size;
+ if (aux->offset == 0x80 || aux->offset == 0x100)
+ aux->segment = 0x0; /* reset segment at end of block */
+}
+
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+ struct drm_dp_aux_msg *msg)
+{
+ ssize_t ret;
+ int const aux_cmd_native_max = 16;
+ int const aux_cmd_i2c_max = 128;
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+
+ /* Ignore address only message */
+ if (msg->size == 0 || !msg->buffer) {
+ msg->reply = aux->native ?
+ DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ return msg->size;
+ }
+
+ /* msg sanity check */
+ if ((aux->native && msg->size > aux_cmd_native_max) ||
+ msg->size > aux_cmd_i2c_max) {
+ DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
+ __func__, msg->size, msg->request);
+ return -EINVAL;
+ }
+
+ mutex_lock(&aux->mutex);
+ if (!aux->initted) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ /*
+ * For eDP it's important to give a reasonably long wait here for HPD
+ * to be asserted. This is because the panel driver may have _just_
+ * turned on the panel and then tried to do an AUX transfer. The panel
+ * driver has no way of knowing when the panel is ready, so it's up
+ * to us to wait. For DP we never get into this situation so let's
+ * avoid ever doing the extra long wait for DP.
+ */
+ if (aux->is_edp) {
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ if (ret) {
+ DRM_DEBUG_DP("Panel not ready for aux transactions\n");
+ goto exit;
+ }
+ }
+
+ dp_aux_update_offset_and_segment(aux, msg);
+ dp_aux_transfer_helper(aux, msg, true);
+
+ aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+ aux->cmd_busy = true;
+
+ if (aux->read) {
+ aux->no_send_addr = true;
+ aux->no_send_stop = false;
+ } else {
+ aux->no_send_addr = true;
+ aux->no_send_stop = true;
+ }
+
+ ret = dp_aux_cmd_fifo_tx(aux, msg);
+ if (ret < 0) {
+ if (aux->native) {
+ aux->retry_cnt++;
+ if (!(aux->retry_cnt % MAX_AUX_RETRIES))
+ dp_catalog_aux_update_cfg(aux->catalog);
+ }
+ /* reset aux if link is in connected state */
+ if (dp_catalog_link_is_connected(aux->catalog))
+ dp_catalog_aux_reset(aux->catalog);
+ } else {
+ aux->retry_cnt = 0;
+ switch (aux->aux_error_num) {
+ case DP_AUX_ERR_NONE:
+ if (aux->read)
+ ret = dp_aux_cmd_fifo_rx(aux, msg);
+ msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+ break;
+ case DP_AUX_ERR_DEFER:
+ msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+ break;
+ case DP_AUX_ERR_PHY:
+ case DP_AUX_ERR_ADDR:
+ case DP_AUX_ERR_NACK:
+ case DP_AUX_ERR_NACK_DEFER:
+ msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_NACK : DP_AUX_I2C_REPLY_NACK;
+ break;
+ case DP_AUX_ERR_TOUT:
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ aux->cmd_busy = false;
+
+exit:
+ mutex_unlock(&aux->mutex);
+
+ return ret;
+}
+
+void dp_aux_isr(struct drm_dp_aux *dp_aux)
+{
+ u32 isr;
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ isr = dp_catalog_aux_get_irq(aux->catalog);
+
+ /* no interrupts pending, return immediately */
+ if (!isr)
+ return;
+
+ if (!aux->cmd_busy) {
+ DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr);
+ return;
+ }
+
+ /*
+ * The logic below assumes only one error bit is set (other than "done"
+ * which can apparently be set at the same time as some of the other
+ * bits). Warn if more than one get set so we know we need to improve
+ * the logic.
+ */
+ if (hweight32(isr & ~DP_INTR_AUX_XFER_DONE) > 1)
+ DRM_WARN("Some DP AUX interrupts unhandled: %#010x\n", isr);
+
+ if (isr & DP_INTR_AUX_ERROR) {
+ aux->aux_error_num = DP_AUX_ERR_PHY;
+ dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+ } else if (isr & DP_INTR_NACK_DEFER) {
+ aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
+ } else if (isr & DP_INTR_WRONG_ADDR) {
+ aux->aux_error_num = DP_AUX_ERR_ADDR;
+ } else if (isr & DP_INTR_TIMEOUT) {
+ aux->aux_error_num = DP_AUX_ERR_TOUT;
+ } else if (!aux->native && (isr & DP_INTR_I2C_NACK)) {
+ aux->aux_error_num = DP_AUX_ERR_NACK;
+ } else if (!aux->native && (isr & DP_INTR_I2C_DEFER)) {
+ if (isr & DP_INTR_AUX_XFER_DONE)
+ aux->aux_error_num = DP_AUX_ERR_NACK;
+ else
+ aux->aux_error_num = DP_AUX_ERR_DEFER;
+ } else if (isr & DP_INTR_AUX_XFER_DONE) {
+ aux->aux_error_num = DP_AUX_ERR_NONE;
+ } else {
+ DRM_WARN("Unexpected interrupt: %#010x\n", isr);
+ return;
+ }
+
+ complete(&aux->comp);
+}
+
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ dp_catalog_aux_update_cfg(aux->catalog);
+ dp_catalog_aux_reset(aux->catalog);
+}
+
+void dp_aux_init(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ mutex_lock(&aux->mutex);
+
+ dp_catalog_aux_enable(aux->catalog, true);
+ aux->retry_cnt = 0;
+ aux->initted = true;
+
+ mutex_unlock(&aux->mutex);
+}
+
+void dp_aux_deinit(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ mutex_lock(&aux->mutex);
+
+ aux->initted = false;
+ dp_catalog_aux_enable(aux->catalog, false);
+
+ mutex_unlock(&aux->mutex);
+}
+
+int dp_aux_register(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+ int ret;
+
+ if (!dp_aux) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ aux->dp_aux.name = "dpu_dp_aux";
+ aux->dp_aux.dev = aux->dev;
+ aux->dp_aux.transfer = dp_aux_transfer;
+ ret = drm_dp_aux_register(&aux->dp_aux);
+ if (ret) {
+ DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void dp_aux_unregister(struct drm_dp_aux *dp_aux)
+{
+ drm_dp_aux_unregister(dp_aux);
+}
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+ bool is_edp)
+{
+ struct dp_aux_private *aux;
+
+ if (!catalog) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
+ if (!aux)
+ return ERR_PTR(-ENOMEM);
+
+ init_completion(&aux->comp);
+ aux->cmd_busy = false;
+ aux->is_edp = is_edp;
+ mutex_init(&aux->mutex);
+
+ aux->dev = dev;
+ aux->catalog = catalog;
+ aux->retry_cnt = 0;
+
+ return &aux->dp_aux;
+}
+
+void dp_aux_put(struct drm_dp_aux *dp_aux)
+{
+ struct dp_aux_private *aux;
+
+ if (!dp_aux)
+ return;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ mutex_destroy(&aux->mutex);
+
+ devm_kfree(aux->dev, aux);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
new file mode 100644
index 000000000..e930974bc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUX_H_
+#define _DP_AUX_H_
+
+#include "dp_catalog.h"
+#include <drm/display/drm_dp_helper.h>
+
+int dp_aux_register(struct drm_dp_aux *dp_aux);
+void dp_aux_unregister(struct drm_dp_aux *dp_aux);
+void dp_aux_isr(struct drm_dp_aux *dp_aux);
+void dp_aux_init(struct drm_dp_aux *dp_aux);
+void dp_aux_deinit(struct drm_dp_aux *dp_aux);
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+ bool is_edp);
+void dp_aux_put(struct drm_dp_aux *aux);
+
+#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
new file mode 100644
index 000000000..421391755
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -0,0 +1,1096 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/rational.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp_catalog.h"
+#include "dp_reg.h"
+
+#define POLLING_SLEEP_US 1000
+#define POLLING_TIMEOUT_US 10000
+
+#define SCRAMBLER_RESET_COUNT_VALUE 0xFC
+
+#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
+#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+
+#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
+
+#define DP_INTERRUPT_STATUS1 \
+ (DP_INTR_AUX_XFER_DONE| \
+ DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+ DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+ DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+ DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTERRUPT_STATUS1_ACK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS1_MASK \
+ (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS2 \
+ (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+ DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTERRUPT_STATUS2_ACK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS2_MASK \
+ (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+struct dp_catalog_private {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct dp_io *io;
+ u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
+ struct dp_catalog dp_catalog;
+ u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
+};
+
+void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ struct dss_io_data *dss = &catalog->io->dp_controller;
+
+ msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
+ msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
+ msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link");
+ msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
+}
+
+static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
+{
+ return readl_relaxed(catalog->io->dp_controller.aux.base + offset);
+}
+
+static inline void dp_write_aux(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure aux reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.aux.base + offset);
+}
+
+static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset)
+{
+ return readl_relaxed(catalog->io->dp_controller.ahb.base + offset);
+}
+
+static inline void dp_write_ahb(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure phy reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.ahb.base + offset);
+}
+
+static inline void dp_write_p0(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.p0.base + offset);
+}
+
+static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
+ u32 offset)
+{
+ /*
+ * To make sure interface reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ return readl_relaxed(catalog->io->dp_controller.p0.base + offset);
+}
+
+static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
+{
+ return readl_relaxed(catalog->io->dp_controller.link.base + offset);
+}
+
+static inline void dp_write_link(struct dp_catalog_private *catalog,
+ u32 offset, u32 data)
+{
+ /*
+ * To make sure link reg writes happens before any other operation,
+ * this function uses writel() instread of writel_relaxed()
+ */
+ writel(data, catalog->io->dp_controller.link.base + offset);
+}
+
+/* aux related catalog functions */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ return dp_read_aux(catalog, REG_DP_AUX_DATA);
+}
+
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data);
+ return 0;
+}
+
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data);
+ return 0;
+}
+
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read)
+{
+ u32 data;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ if (read) {
+ data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
+ data &= ~DP_AUX_TRANS_CTRL_GO;
+ dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+ } else {
+ dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
+ }
+ return 0;
+}
+
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+ dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+ return 0;
+}
+
+/**
+ * dp_catalog_aux_reset() - reset AUX controller
+ *
+ * @dp_catalog: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset AUX controller
+ *
+ * NOTE: reset AUX controller will also clear any pending HPD related interrupts
+ *
+ */
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
+{
+ u32 aux_ctrl;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+ aux_ctrl |= DP_AUX_CTRL_RESET;
+ dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ aux_ctrl &= ~DP_AUX_CTRL_RESET;
+ dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
+{
+ u32 aux_ctrl;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+ if (enable) {
+ dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
+ dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
+ aux_ctrl |= DP_AUX_CTRL_ENABLE;
+ } else {
+ aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
+ }
+
+ dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ struct dp_io *dp_io = catalog->io;
+ struct phy *phy = dp_io->phy;
+
+ phy_calibrate(phy);
+}
+
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
+{
+ u32 state;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ /* poll for hpd connected status every 2ms and timeout after 500ms */
+ return readl_poll_timeout(catalog->io->dp_controller.aux.base +
+ REG_DP_DP_HPD_INT_STATUS,
+ state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
+ 2000, 500000);
+}
+
+static void dump_regs(void __iomem *base, int len)
+{
+ int i;
+ u32 x0, x4, x8, xc;
+ u32 addr_off = 0;
+
+ len = DIV_ROUND_UP(len, 16);
+ for (i = 0; i < len; i++) {
+ x0 = readl_relaxed(base + addr_off);
+ x4 = readl_relaxed(base + addr_off + 0x04);
+ x8 = readl_relaxed(base + addr_off + 0x08);
+ xc = readl_relaxed(base + addr_off + 0x0c);
+
+ pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
+ addr_off += 16;
+ }
+}
+
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ struct dss_io_data *io = &catalog->io->dp_controller;
+
+ pr_info("AHB regs\n");
+ dump_regs(io->ahb.base, io->ahb.len);
+
+ pr_info("AUXCLK regs\n");
+ dump_regs(io->aux.base, io->aux.len);
+
+ pr_info("LCLK regs\n");
+ dump_regs(io->link.base, io->link.len);
+
+ pr_info("P0CLK regs\n");
+ dump_regs(io->p0.base, io->p0.len);
+}
+
+u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 intr, intr_ack;
+
+ intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS);
+ intr &= ~DP_INTERRUPT_STATUS1_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS1)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
+ DP_INTERRUPT_STATUS1_MASK);
+
+ return intr;
+
+}
+
+/* controller related catalog functions */
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+ u32 dp_tu, u32 valid_boundary,
+ u32 valid_boundary2)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
+ dp_write_link(catalog, REG_DP_TU, dp_tu);
+ dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
+}
+
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_link(catalog, REG_DP_STATE_CTRL, state);
+}
+
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
+
+ dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
+}
+
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+ u32 ln_mapping;
+
+ ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
+ ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
+ ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
+ ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+
+ dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+ ln_mapping);
+}
+
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
+ bool enable)
+{
+ u32 mainlink_ctrl;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
+ if (enable) {
+ /*
+ * To make sure link reg writes happens before other operation,
+ * dp_write_link() function uses writel()
+ */
+ mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+
+ mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
+ DP_MAINLINK_CTRL_ENABLE);
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+ mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
+ DP_MAINLINK_FB_BOUNDARY_SEL);
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ } else {
+ mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+ }
+}
+
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
+ u32 colorimetry_cfg,
+ u32 test_bits_depth)
+{
+ u32 misc_val;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+
+ /* clear bpp bits */
+ misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
+ misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
+ misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
+ /* Configure clock to synchronous mode */
+ misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
+
+ drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
+ dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
+}
+
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
+ u32 rate, u32 stream_rate_khz,
+ bool fixed_nvid)
+{
+ u32 pixel_m, pixel_n;
+ u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
+ u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
+ u32 const link_rate_hbr2 = 540000;
+ u32 const link_rate_hbr3 = 810000;
+ unsigned long den, num;
+
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ if (rate == link_rate_hbr3)
+ pixel_div = 6;
+ else if (rate == 162000 || rate == 270000)
+ pixel_div = 2;
+ else if (rate == link_rate_hbr2)
+ pixel_div = 4;
+ else
+ DRM_ERROR("Invalid pixel mux divider\n");
+
+ dispcc_input_rate = (rate * 10) / pixel_div;
+
+ rational_best_approximation(dispcc_input_rate, stream_rate_khz,
+ (unsigned long)(1 << 16) - 1,
+ (unsigned long)(1 << 16) - 1, &den, &num);
+
+ den = ~(den - num);
+ den = den & 0xFFFF;
+ pixel_m = num;
+ pixel_n = den;
+
+ mvid = (pixel_m & 0xFFFF) * 5;
+ nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+ if (nvid < nvid_fixed) {
+ u32 temp;
+
+ temp = (nvid_fixed / nvid) * nvid;
+ mvid = (nvid_fixed / nvid) * mvid;
+ nvid = temp;
+ }
+
+ if (link_rate_hbr2 == rate)
+ nvid *= 2;
+
+ if (link_rate_hbr3 == rate)
+ nvid *= 3;
+
+ drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+ dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
+ dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
+ dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
+}
+
+int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
+ u32 state_bit)
+{
+ int bit, ret;
+ u32 data;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ bit = BIT(state_bit - 1);
+ drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
+ dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
+
+ bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
+
+ /* Poll for mainlink ready status */
+ ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base +
+ REG_DP_MAINLINK_READY,
+ data, data & bit,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit);
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * dp_catalog_hw_revision() - retrieve DP hw revision
+ *
+ * @dp_catalog: DP catalog structure
+ *
+ * Return: DP controller hw revision
+ *
+ */
+u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog)
+{
+ const struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ return dp_read_ahb(catalog, REG_DP_HW_VERSION);
+}
+
+/**
+ * dp_catalog_ctrl_reset() - reset DP controller
+ *
+ * @dp_catalog: DP catalog structure
+ *
+ * return: void
+ *
+ * This function reset the DP controller
+ *
+ * NOTE: reset DP controller will also clear any pending HPD related interrupts
+ *
+ */
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
+{
+ u32 sw_reset;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET);
+
+ sw_reset |= DP_SW_RESET;
+ dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+
+ sw_reset &= ~DP_SW_RESET;
+ dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+}
+
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
+{
+ u32 data;
+ int ret;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ /* Poll for mainlink ready status */
+ ret = readl_poll_timeout(catalog->io->dp_controller.link.base +
+ REG_DP_MAINLINK_READY,
+ data, data & DP_MAINLINK_READY_FOR_VIDEO,
+ POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+ if (ret < 0) {
+ DRM_ERROR("mainlink not ready\n");
+ return false;
+ }
+
+ return true;
+}
+
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog,
+ bool enable)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ if (enable) {
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS,
+ DP_INTERRUPT_STATUS1_MASK);
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ DP_INTERRUPT_STATUS2_MASK);
+ } else {
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
+ }
+}
+
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+ u32 intr_mask, bool en)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+
+ config = (en ? config | intr_mask : config & ~intr_mask);
+
+ drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
+ intr_mask, config);
+ dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
+ config & DP_DP_HPD_INT_MASK);
+}
+
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+
+ /* Configure REFTIMER and enable it */
+ reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
+ dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+
+ /* Enable HPD */
+ dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+}
+
+u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 status;
+
+ status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
+ status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
+ status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
+
+ return status;
+}
+
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ int isr, mask;
+
+ isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+ dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
+ (isr & DP_DP_HPD_INT_MASK));
+ mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+
+ /*
+ * We only want to return interrupts that are unmasked to the caller.
+ * However, the interrupt status field also contains other
+ * informational bits about the HPD state status, so we only mask
+ * out the part of the register that tells us about which interrupts
+ * are pending.
+ */
+ return isr & (mask | ~DP_DP_HPD_INT_MASK);
+}
+
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 intr, intr_ack;
+
+ intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
+ intr &= ~DP_INTERRUPT_STATUS2_MASK;
+ intr_ack = (intr & DP_INTERRUPT_STATUS2)
+ << DP_INTERRUPT_STATUS_ACK_SHIFT;
+ dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+ intr_ack | DP_INTERRUPT_STATUS2_MASK);
+
+ return intr;
+}
+
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_ahb(catalog, REG_DP_PHY_CTRL,
+ DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
+ usleep_range(1000, 1100); /* h/w recommended delay */
+ dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
+}
+
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog,
+ u8 v_level, u8 p_level)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ struct dp_io *dp_io = catalog->io;
+ struct phy *phy = dp_io->phy;
+ struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+ /* TODO: Update for all lanes instead of just first one */
+ opts_dp->voltage[0] = v_level;
+ opts_dp->pre[0] = p_level;
+ opts_dp->set_voltages = 1;
+ phy_configure(phy, &dp_io->phy_opts);
+ opts_dp->set_voltages = 0;
+
+ return 0;
+}
+
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+ u32 pattern)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 value = 0x0;
+
+ /* Make sure to clear the current pattern before starting a new one */
+ dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+
+ drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
+ switch (pattern) {
+ case DP_PHY_TEST_PATTERN_D10_2:
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
+ break;
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ value &= ~(1 << 16);
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ break;
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_PRBS7);
+ break;
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
+ /* 00111110000011111000001111100000 */
+ dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+ 0x3E0F83E0);
+ /* 00001111100000111110000011111000 */
+ dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+ 0x0F83E0F8);
+ /* 1111100000111110 */
+ dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+ 0x0000F83E);
+ break;
+ case DP_PHY_TEST_PATTERN_CP2520:
+ value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+
+ value = DP_HBR2_ERM_PATTERN;
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ value |= SCRAMBLER_RESET_COUNT_VALUE;
+ dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+ value);
+ dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+ DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+ value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+ value |= DP_MAINLINK_CTRL_ENABLE;
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+ break;
+ case DP_PHY_TEST_PATTERN_SEL_MASK:
+ dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
+ DP_MAINLINK_CTRL_ENABLE);
+ dp_write_link(catalog, REG_DP_STATE_CTRL,
+ DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
+ break;
+ default:
+ drm_dbg_dp(catalog->drm_dev,
+ "No valid test pattern requested: %#x\n", pattern);
+ break;
+ }
+}
+
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ return dp_read_link(catalog, REG_DP_MAINLINK_READY);
+}
+
+/* panel related catalog functions */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 reg;
+
+ dp_write_link(catalog, REG_DP_TOTAL_HOR_VER,
+ dp_catalog->total);
+ dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC,
+ dp_catalog->sync_start);
+ dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
+ dp_catalog->width_blanking);
+ dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+
+ reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
+
+ if (dp_catalog->wide_bus_en)
+ reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
+ else
+ reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
+
+
+ DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg);
+
+ dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
+ return 0;
+}
+
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+ struct drm_display_mode *drm_mode)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 v_sync_width;
+ u32 hsync_ctl;
+ u32 display_hctl;
+
+ /* TPG config parameters*/
+ hsync_period = drm_mode->htotal;
+ vsync_period = drm_mode->vtotal;
+
+ display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
+ hsync_period);
+ display_v_end = ((vsync_period - (drm_mode->vsync_start -
+ drm_mode->vdisplay))
+ * hsync_period) - 1;
+
+ display_v_start += drm_mode->htotal - drm_mode->hsync_start;
+ display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
+
+ hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
+ hsync_end_x = hsync_period - (drm_mode->hsync_start -
+ drm_mode->hdisplay) - 1;
+
+ v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
+
+ hsync_ctl = (hsync_period << 16) |
+ (drm_mode->hsync_end - drm_mode->hsync_start);
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+
+ dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
+ dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+ hsync_period);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+ hsync_period);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+ dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+ dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+ dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+ dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+ dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
+ DP_TPG_CHECKERED_RECT_PATTERN);
+ dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
+ DP_TPG_VIDEO_CONFIG_BPP_8BIT |
+ DP_TPG_VIDEO_CONFIG_RGB);
+ dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
+ DP_BIST_ENABLE_DPBIST_EN);
+ dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
+ DP_TIMING_ENGINE_EN_EN);
+ drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
+}
+
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+ dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
+ dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+{
+ struct dp_catalog_private *catalog;
+
+ if (!io) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
+ if (!catalog)
+ return ERR_PTR(-ENOMEM);
+
+ catalog->dev = dev;
+ catalog->io = io;
+
+ return &catalog->dp_catalog;
+}
+
+void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ enum dp_catalog_audio_sdp_type sdp;
+ enum dp_catalog_audio_header_type header;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sdp_map = catalog->audio_map;
+ sdp = dp_catalog->sdp_type;
+ header = dp_catalog->sdp_header;
+
+ dp_catalog->audio_data = dp_read_link(catalog,
+ sdp_map[sdp][header]);
+}
+
+void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+ enum dp_catalog_audio_sdp_type sdp;
+ enum dp_catalog_audio_header_type header;
+ u32 data;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sdp_map = catalog->audio_map;
+ sdp = dp_catalog->sdp_type;
+ header = dp_catalog->sdp_header;
+ data = dp_catalog->audio_data;
+
+ dp_write_link(catalog, sdp_map[sdp][header], data);
+}
+
+void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 acr_ctrl, select;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ select = dp_catalog->audio_data;
+ acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
+
+ drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
+ select, acr_ctrl);
+
+ dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
+}
+
+void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ bool enable;
+ u32 audio_ctrl;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ enable = !!dp_catalog->audio_data;
+ audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
+
+ if (enable)
+ audio_ctrl |= BIT(0);
+ else
+ audio_ctrl &= ~BIT(0);
+
+ drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
+
+ dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
+ /* make sure audio engine is disabled */
+ wmb();
+}
+
+void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 sdp_cfg = 0;
+ u32 sdp_cfg2 = 0;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+ /* AUDIO_TIMESTAMP_SDP_EN */
+ sdp_cfg |= BIT(1);
+ /* AUDIO_STREAM_SDP_EN */
+ sdp_cfg |= BIT(2);
+ /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+ sdp_cfg |= BIT(5);
+ /* AUDIO_ISRC_SDP_EN */
+ sdp_cfg |= BIT(6);
+ /* AUDIO_INFOFRAME_SDP_EN */
+ sdp_cfg |= BIT(20);
+
+ drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
+
+ dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
+
+ sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+ /* IFRM_REGSRC -> Do not use reg values */
+ sdp_cfg2 &= ~BIT(0);
+ /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+ sdp_cfg2 &= ~BIT(1);
+
+ drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
+
+ dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
+}
+
+void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+
+ static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
+ {
+ MMSS_DP_AUDIO_STREAM_0,
+ MMSS_DP_AUDIO_STREAM_1,
+ MMSS_DP_AUDIO_STREAM_1,
+ },
+ {
+ MMSS_DP_AUDIO_TIMESTAMP_0,
+ MMSS_DP_AUDIO_TIMESTAMP_1,
+ MMSS_DP_AUDIO_TIMESTAMP_1,
+ },
+ {
+ MMSS_DP_AUDIO_INFOFRAME_0,
+ MMSS_DP_AUDIO_INFOFRAME_1,
+ MMSS_DP_AUDIO_INFOFRAME_1,
+ },
+ {
+ MMSS_DP_AUDIO_COPYMANAGEMENT_0,
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+ MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+ },
+ {
+ MMSS_DP_AUDIO_ISRC_0,
+ MMSS_DP_AUDIO_ISRC_1,
+ MMSS_DP_AUDIO_ISRC_1,
+ },
+ };
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ catalog->audio_map = sdp_map;
+}
+
+void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
+{
+ struct dp_catalog_private *catalog;
+ u32 mainlink_levels, safe_to_exit_level;
+
+ if (!dp_catalog)
+ return;
+
+ catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ safe_to_exit_level = dp_catalog->audio_data;
+ mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
+ mainlink_levels &= 0xFE0;
+ mainlink_levels |= safe_to_exit_level;
+
+ drm_dbg_dp(catalog->drm_dev,
+ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+ mainlink_levels, safe_to_exit_level);
+
+ dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
new file mode 100644
index 000000000..f36b7b372
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CATALOG_H_
+#define _DP_CATALOG_H_
+
+#include <drm/drm_modes.h>
+
+#include "dp_parser.h"
+#include "disp/msm_disp_snapshot.h"
+
+/* interrupts */
+#define DP_INTR_HPD BIT(0)
+#define DP_INTR_AUX_XFER_DONE BIT(3)
+#define DP_INTR_WRONG_ADDR BIT(6)
+#define DP_INTR_TIMEOUT BIT(9)
+#define DP_INTR_NACK_DEFER BIT(12)
+#define DP_INTR_WRONG_DATA_CNT BIT(15)
+#define DP_INTR_I2C_NACK BIT(18)
+#define DP_INTR_I2C_DEFER BIT(21)
+#define DP_INTR_PLL_UNLOCKED BIT(24)
+#define DP_INTR_AUX_ERROR BIT(27)
+
+#define DP_INTR_READY_FOR_VIDEO BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT BIT(3)
+#define DP_INTR_FRAME_END BIT(6)
+#define DP_INTR_CRC_UPDATED BIT(9)
+
+#define DP_AUX_CFG_MAX_VALUE_CNT 3
+
+/* PHY AUX config registers */
+enum dp_phy_aux_config_type {
+ PHY_AUX_CFG0,
+ PHY_AUX_CFG1,
+ PHY_AUX_CFG2,
+ PHY_AUX_CFG3,
+ PHY_AUX_CFG4,
+ PHY_AUX_CFG5,
+ PHY_AUX_CFG6,
+ PHY_AUX_CFG7,
+ PHY_AUX_CFG8,
+ PHY_AUX_CFG9,
+ PHY_AUX_CFG_MAX,
+};
+
+enum dp_catalog_audio_sdp_type {
+ DP_AUDIO_SDP_STREAM,
+ DP_AUDIO_SDP_TIMESTAMP,
+ DP_AUDIO_SDP_INFOFRAME,
+ DP_AUDIO_SDP_COPYMANAGEMENT,
+ DP_AUDIO_SDP_ISRC,
+ DP_AUDIO_SDP_MAX,
+};
+
+enum dp_catalog_audio_header_type {
+ DP_AUDIO_SDP_HEADER_1,
+ DP_AUDIO_SDP_HEADER_2,
+ DP_AUDIO_SDP_HEADER_3,
+ DP_AUDIO_SDP_HEADER_MAX,
+};
+
+struct dp_catalog {
+ u32 aux_data;
+ u32 total;
+ u32 sync_start;
+ u32 width_blanking;
+ u32 dp_active;
+ enum dp_catalog_audio_sdp_type sdp_type;
+ enum dp_catalog_audio_header_type sdp_header;
+ u32 audio_data;
+ bool wide_bus_en;
+};
+
+/* Debug module */
+void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state);
+
+/* AUX APIs */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog);
+u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+
+/* DP Controller APIs */
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
+ u32 stream_rate_khz, bool fixed_nvid);
+int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
+u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+ u32 intr_mask, bool en);
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
+u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
+ u8 p_level);
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+ u32 dp_tu, u32 valid_boundary,
+ u32 valid_boundary2);
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+ u32 pattern);
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
+
+/* DP Panel APIs */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog);
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+ struct drm_display_mode *drm_mode);
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+
+/* DP Audio APIs */
+void dp_catalog_audio_get_header(struct dp_catalog *catalog);
+void dp_catalog_audio_set_header(struct dp_catalog *catalog);
+void dp_catalog_audio_config_acr(struct dp_catalog *catalog);
+void dp_catalog_audio_enable(struct dp_catalog *catalog);
+void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
+void dp_catalog_audio_init(struct dp_catalog *catalog);
+void dp_catalog_audio_sfe_level(struct dp_catalog *catalog);
+
+#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
new file mode 100644
index 000000000..103eef9f0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -0,0 +1,2049 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/pm_opp.h>
+
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_ctrl.h"
+#include "dp_link.h"
+
+#define DP_KHZ_TO_HZ 1000
+#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */
+#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
+
+#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0)
+#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3)
+
+#define MR_LINK_TRAINING1 0x8
+#define MR_LINK_SYMBOL_ERM 0x80
+#define MR_LINK_PRBS7 0x100
+#define MR_LINK_CUSTOM80 0x200
+#define MR_LINK_TRAINING4 0x40
+
+enum {
+ DP_TRAINING_NONE,
+ DP_TRAINING_1,
+ DP_TRAINING_2,
+};
+
+struct dp_tu_calc_input {
+ u64 lclk; /* 162, 270, 540 and 810 */
+ u64 pclk_khz; /* in KHz */
+ u64 hactive; /* active h-width */
+ u64 hporch; /* bp + fp + pulse */
+ int nlanes; /* no.of.lanes */
+ int bpp; /* bits */
+ int pixel_enc; /* 444, 420, 422 */
+ int dsc_en; /* dsc on/off */
+ int async_en; /* async mode */
+ int fec_en; /* fec */
+ int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */
+ int num_of_dsc_slices; /* number of slices per line */
+};
+
+struct dp_vc_tu_mapping_table {
+ u32 vic;
+ u8 lanes;
+ u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
+ u8 bpp;
+ u8 valid_boundary_link;
+ u16 delay_start_link;
+ bool boundary_moderation_en;
+ u8 valid_lower_boundary_link;
+ u8 upper_boundary_count;
+ u8 lower_boundary_count;
+ u8 tu_size_minus1;
+};
+
+struct dp_ctrl_private {
+ struct dp_ctrl dp_ctrl;
+ struct drm_device *drm_dev;
+ struct device *dev;
+ struct drm_dp_aux *aux;
+ struct dp_panel *panel;
+ struct dp_link *link;
+ struct dp_power *power;
+ struct dp_parser *parser;
+ struct dp_catalog *catalog;
+
+ struct completion idle_comp;
+ struct completion video_comp;
+};
+
+static int dp_aux_link_configure(struct drm_dp_aux *aux,
+ struct dp_link_info *link)
+{
+ u8 values[2];
+ int err;
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->num_lanes;
+
+ if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ reinit_completion(&ctrl->idle_comp);
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+
+ if (!wait_for_completion_timeout(&ctrl->idle_comp,
+ IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
+ pr_warn("PUSH_IDLE pattern timedout\n");
+
+ drm_dbg_dp(ctrl->drm_dev, "mainlink off\n");
+}
+
+static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+{
+ u32 config = 0, tbd;
+ const u8 *dpcd = ctrl->panel->dpcd;
+
+ /* Default-> LSCLK DIV: 1/4 LCLK */
+ config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
+
+ /* Scrambler reset enable */
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd))
+ config |= DP_CONFIGURATION_CTRL_ASSR;
+
+ tbd = dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->dp_mode.bpp);
+
+ if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
+ pr_debug("BIT_DEPTH not set. Configure default\n");
+ tbd = DP_TEST_BIT_DEPTH_8;
+ }
+
+ config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
+
+ /* Num of Lanes */
+ config |= ((ctrl->link->link_params.num_lanes - 1)
+ << DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT);
+
+ if (drm_dp_enhanced_frame_cap(dpcd))
+ config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
+
+ config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */
+
+ /* sync clock & static Mvid */
+ config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN;
+ config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK;
+
+ dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+}
+
+static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+{
+ u32 cc, tb;
+
+ dp_catalog_ctrl_lane_mapping(ctrl->catalog);
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+ dp_ctrl_config_ctrl(ctrl);
+
+ tb = dp_link_get_test_bits_depth(ctrl->link,
+ ctrl->panel->dp_mode.bpp);
+ cc = dp_link_get_colorimetry_config(ctrl->link);
+ dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
+ dp_panel_timing_cfg(ctrl->panel);
+}
+
+/*
+ * The structure and few functions present below are IP/Hardware
+ * specific implementation. Most of the implementation will not
+ * have coding comments
+ */
+struct tu_algo_data {
+ s64 lclk_fp;
+ s64 pclk_fp;
+ s64 lwidth;
+ s64 lwidth_fp;
+ s64 hbp_relative_to_pclk;
+ s64 hbp_relative_to_pclk_fp;
+ int nlanes;
+ int bpp;
+ int pixelEnc;
+ int dsc_en;
+ int async_en;
+ int bpc;
+
+ uint delay_start_link_extra_pixclk;
+ int extra_buffer_margin;
+ s64 ratio_fp;
+ s64 original_ratio_fp;
+
+ s64 err_fp;
+ s64 n_err_fp;
+ s64 n_n_err_fp;
+ int tu_size;
+ int tu_size_desired;
+ int tu_size_minus1;
+
+ int valid_boundary_link;
+ s64 resulting_valid_fp;
+ s64 total_valid_fp;
+ s64 effective_valid_fp;
+ s64 effective_valid_recorded_fp;
+ int n_tus;
+ int n_tus_per_lane;
+ int paired_tus;
+ int remainder_tus;
+ int remainder_tus_upper;
+ int remainder_tus_lower;
+ int extra_bytes;
+ int filler_size;
+ int delay_start_link;
+
+ int extra_pclk_cycles;
+ int extra_pclk_cycles_in_link_clk;
+ s64 ratio_by_tu_fp;
+ s64 average_valid2_fp;
+ int new_valid_boundary_link;
+ int remainder_symbols_exist;
+ int n_symbols;
+ s64 n_remainder_symbols_per_lane_fp;
+ s64 last_partial_tu_fp;
+ s64 TU_ratio_err_fp;
+
+ int n_tus_incl_last_incomplete_tu;
+ int extra_pclk_cycles_tmp;
+ int extra_pclk_cycles_in_link_clk_tmp;
+ int extra_required_bytes_new_tmp;
+ int filler_size_tmp;
+ int lower_filler_size_tmp;
+ int delay_start_link_tmp;
+
+ bool boundary_moderation_en;
+ int boundary_mod_lower_err;
+ int upper_boundary_count;
+ int lower_boundary_count;
+ int i_upper_boundary_count;
+ int i_lower_boundary_count;
+ int valid_lower_boundary_link;
+ int even_distribution_BF;
+ int even_distribution_legacy;
+ int even_distribution;
+ int min_hblank_violated;
+ s64 delay_start_time_fp;
+ s64 hbp_time_fp;
+ s64 hactive_time_fp;
+ s64 diff_abs_fp;
+
+ s64 ratio;
+};
+
+static int _tu_param_compare(s64 a, s64 b)
+{
+ u32 a_sign;
+ u32 b_sign;
+ s64 a_temp, b_temp, minus_1;
+
+ if (a == b)
+ return 0;
+
+ minus_1 = drm_fixp_from_fraction(-1, 1);
+
+ a_sign = (a >> 32) & 0x80000000 ? 1 : 0;
+
+ b_sign = (b >> 32) & 0x80000000 ? 1 : 0;
+
+ if (a_sign > b_sign)
+ return 2;
+ else if (b_sign > a_sign)
+ return 1;
+
+ if (!a_sign && !b_sign) { /* positive */
+ if (a > b)
+ return 1;
+ else
+ return 2;
+ } else { /* negative */
+ a_temp = drm_fixp_mul(a, minus_1);
+ b_temp = drm_fixp_mul(b, minus_1);
+
+ if (a_temp > b_temp)
+ return 2;
+ else
+ return 1;
+ }
+}
+
+static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
+ struct tu_algo_data *tu)
+{
+ int nlanes = in->nlanes;
+ int dsc_num_slices = in->num_of_dsc_slices;
+ int dsc_num_bytes = 0;
+ int numerator;
+ s64 pclk_dsc_fp;
+ s64 dwidth_dsc_fp;
+ s64 hbp_dsc_fp;
+
+ int tot_num_eoc_symbols = 0;
+ int tot_num_hor_bytes = 0;
+ int tot_num_dummy_bytes = 0;
+ int dwidth_dsc_bytes = 0;
+ int eoc_bytes = 0;
+
+ s64 temp1_fp, temp2_fp, temp3_fp;
+
+ tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1);
+ tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000);
+ tu->lwidth = in->hactive;
+ tu->hbp_relative_to_pclk = in->hporch;
+ tu->nlanes = in->nlanes;
+ tu->bpp = in->bpp;
+ tu->pixelEnc = in->pixel_enc;
+ tu->dsc_en = in->dsc_en;
+ tu->async_en = in->async_en;
+ tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1);
+ tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1);
+
+ if (tu->pixelEnc == 420) {
+ temp1_fp = drm_fixp_from_fraction(2, 1);
+ tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp);
+ tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp);
+ tu->hbp_relative_to_pclk_fp =
+ drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2);
+ }
+
+ if (tu->pixelEnc == 422) {
+ switch (tu->bpp) {
+ case 24:
+ tu->bpp = 16;
+ tu->bpc = 8;
+ break;
+ case 30:
+ tu->bpp = 20;
+ tu->bpc = 10;
+ break;
+ default:
+ tu->bpp = 16;
+ tu->bpc = 8;
+ break;
+ }
+ } else {
+ tu->bpc = tu->bpp/3;
+ }
+
+ if (!in->dsc_en)
+ goto fec_check;
+
+ temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100);
+ temp2_fp = drm_fixp_from_fraction(in->bpp, 1);
+ temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp);
+
+ temp1_fp = drm_fixp_from_fraction(8, 1);
+ temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+
+ numerator = drm_fixp2int(temp3_fp);
+
+ dsc_num_bytes = numerator / dsc_num_slices;
+ eoc_bytes = dsc_num_bytes % nlanes;
+ tot_num_eoc_symbols = nlanes * dsc_num_slices;
+ tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices;
+ tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices;
+
+ if (dsc_num_bytes == 0)
+ pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
+
+ dwidth_dsc_bytes = (tot_num_hor_bytes +
+ tot_num_eoc_symbols +
+ (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes));
+
+ dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3);
+
+ temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp);
+ temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp);
+ pclk_dsc_fp = temp1_fp;
+
+ temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp);
+ temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp);
+ hbp_dsc_fp = temp2_fp;
+
+ /* output */
+ tu->pclk_fp = pclk_dsc_fp;
+ tu->lwidth_fp = dwidth_dsc_fp;
+ tu->hbp_relative_to_pclk_fp = hbp_dsc_fp;
+
+fec_check:
+ if (in->fec_en) {
+ temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */
+ tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp);
+ }
+}
+
+static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
+{
+ s64 temp1_fp, temp2_fp, temp, temp1, temp2;
+ int compare_result_1, compare_result_2, compare_result_3;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+
+ tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+ temp = (tu->i_upper_boundary_count *
+ tu->new_valid_boundary_link +
+ tu->i_lower_boundary_count *
+ (tu->new_valid_boundary_link-1));
+ tu->average_valid2_fp = drm_fixp_from_fraction(temp,
+ (tu->i_upper_boundary_count +
+ tu->i_lower_boundary_count));
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp2_fp = tu->lwidth_fp;
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+ temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+ tu->n_tus = drm_fixp2int(temp2_fp);
+ if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+ tu->n_tus += 1;
+
+ temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1);
+ temp2_fp = temp1_fp - temp2_fp;
+ temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
+ temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ tu->n_remainder_symbols_per_lane_fp = temp2_fp;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ tu->last_partial_tu_fp =
+ drm_fixp_div(tu->n_remainder_symbols_per_lane_fp,
+ temp1_fp);
+
+ if (tu->n_remainder_symbols_per_lane_fp != 0)
+ tu->remainder_symbols_exist = 1;
+ else
+ tu->remainder_symbols_exist = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes);
+ tu->n_tus_per_lane = drm_fixp2int(temp1_fp);
+
+ tu->paired_tus = (int)((tu->n_tus_per_lane) /
+ (tu->i_upper_boundary_count +
+ tu->i_lower_boundary_count));
+
+ tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus *
+ (tu->i_upper_boundary_count +
+ tu->i_lower_boundary_count);
+
+ if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) {
+ tu->remainder_tus_upper = tu->i_upper_boundary_count;
+ tu->remainder_tus_lower = tu->remainder_tus -
+ tu->i_upper_boundary_count;
+ } else {
+ tu->remainder_tus_upper = tu->remainder_tus;
+ tu->remainder_tus_lower = 0;
+ }
+
+ temp = tu->paired_tus * (tu->i_upper_boundary_count *
+ tu->new_valid_boundary_link +
+ tu->i_lower_boundary_count *
+ (tu->new_valid_boundary_link - 1)) +
+ (tu->remainder_tus_upper *
+ tu->new_valid_boundary_link) +
+ (tu->remainder_tus_lower *
+ (tu->new_valid_boundary_link - 1));
+ tu->total_valid_fp = drm_fixp_from_fraction(temp, 1);
+
+ if (tu->remainder_symbols_exist) {
+ temp1_fp = tu->total_valid_fp +
+ tu->n_remainder_symbols_per_lane_fp;
+ temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+ temp2_fp = temp2_fp + tu->last_partial_tu_fp;
+ temp1_fp = drm_fixp_div(temp1_fp, temp2_fp);
+ } else {
+ temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+ temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp);
+ }
+ tu->effective_valid_fp = temp1_fp;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+ tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+ tu->n_err_fp = tu->average_valid2_fp - temp2_fp;
+
+ tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp2_fp = tu->lwidth_fp;
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+ temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+
+ if (temp2_fp)
+ tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp);
+ else
+ tu->n_tus_incl_last_incomplete_tu = 0;
+
+ temp1 = 0;
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+ temp1_fp = tu->average_valid2_fp - temp2_fp;
+ temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ temp1 = drm_fixp2int_ceil(temp1_fp);
+
+ temp = tu->i_upper_boundary_count * tu->nlanes;
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1);
+ temp2_fp = temp1_fp - temp2_fp;
+ temp1_fp = drm_fixp_from_fraction(temp, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ if (temp2_fp)
+ temp2 = drm_fixp2int_ceil(temp2_fp);
+ else
+ temp2 = 0;
+ tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2);
+
+ temp1_fp = drm_fixp_from_fraction(8, tu->bpp);
+ temp2_fp = drm_fixp_from_fraction(
+ tu->extra_required_bytes_new_tmp, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->extra_pclk_cycles_tmp = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1);
+ temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
+ temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ if (temp1_fp)
+ tu->extra_pclk_cycles_in_link_clk_tmp =
+ drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->extra_pclk_cycles_in_link_clk_tmp = 0;
+
+ tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link;
+
+ tu->lower_filler_size_tmp = tu->filler_size_tmp + 1;
+
+ tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp +
+ tu->lower_filler_size_tmp +
+ tu->extra_buffer_margin;
+
+ temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1);
+ tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
+
+ compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp);
+ if (compare_result_1 == 2)
+ compare_result_1 = 1;
+ else
+ compare_result_1 = 0;
+
+ compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp);
+ if (compare_result_2 == 2)
+ compare_result_2 = 1;
+ else
+ compare_result_2 = 0;
+
+ compare_result_3 = _tu_param_compare(tu->hbp_time_fp,
+ tu->delay_start_time_fp);
+ if (compare_result_3 == 2)
+ compare_result_3 = 0;
+ else
+ compare_result_3 = 1;
+
+ if (((tu->even_distribution == 1) ||
+ ((tu->even_distribution_BF == 0) &&
+ (tu->even_distribution_legacy == 0))) &&
+ tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 &&
+ compare_result_2 &&
+ (compare_result_1 || (tu->min_hblank_violated == 1)) &&
+ (tu->new_valid_boundary_link - 1) > 0 &&
+ compare_result_3 &&
+ (tu->delay_start_link_tmp <= 1023)) {
+ tu->upper_boundary_count = tu->i_upper_boundary_count;
+ tu->lower_boundary_count = tu->i_lower_boundary_count;
+ tu->err_fp = tu->n_n_err_fp;
+ tu->boundary_moderation_en = true;
+ tu->tu_size_desired = tu->tu_size;
+ tu->valid_boundary_link = tu->new_valid_boundary_link;
+ tu->effective_valid_recorded_fp = tu->effective_valid_fp;
+ tu->even_distribution_BF = 1;
+ tu->delay_start_link = tu->delay_start_link_tmp;
+ } else if (tu->boundary_mod_lower_err == 0) {
+ compare_result_1 = _tu_param_compare(tu->n_n_err_fp,
+ tu->diff_abs_fp);
+ if (compare_result_1 == 2)
+ tu->boundary_mod_lower_err = 1;
+ }
+}
+
+static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl,
+ struct dp_tu_calc_input *in,
+ struct dp_vc_tu_mapping_table *tu_table)
+{
+ struct tu_algo_data *tu;
+ int compare_result_1, compare_result_2;
+ u64 temp = 0;
+ s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0;
+
+ s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */
+ s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */
+ s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */
+ s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000);
+
+ u8 DP_BRUTE_FORCE = 1;
+ s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */
+ uint EXTRA_PIXCLK_CYCLE_DELAY = 4;
+ uint HBLANK_MARGIN = 4;
+
+ tu = kzalloc(sizeof(*tu), GFP_KERNEL);
+ if (!tu)
+ return;
+
+ dp_panel_update_tu_timings(in, tu);
+
+ tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
+
+ temp1_fp = drm_fixp_from_fraction(4, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, tu->lclk_fp);
+ temp_fp = drm_fixp_div(temp2_fp, tu->pclk_fp);
+ tu->extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp2_fp = drm_fixp_mul(tu->pclk_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
+ temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ tu->ratio_fp = drm_fixp_div(temp2_fp, tu->lclk_fp);
+
+ tu->original_ratio_fp = tu->ratio_fp;
+ tu->boundary_moderation_en = false;
+ tu->upper_boundary_count = 0;
+ tu->lower_boundary_count = 0;
+ tu->i_upper_boundary_count = 0;
+ tu->i_lower_boundary_count = 0;
+ tu->valid_lower_boundary_link = 0;
+ tu->even_distribution_BF = 0;
+ tu->even_distribution_legacy = 0;
+ tu->even_distribution = 0;
+ tu->delay_start_time_fp = 0;
+
+ tu->err_fp = drm_fixp_from_fraction(1000, 1);
+ tu->n_err_fp = 0;
+ tu->n_n_err_fp = 0;
+
+ tu->ratio = drm_fixp2int(tu->ratio_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
+ div64_u64_rem(tu->lwidth_fp, temp1_fp, &temp2_fp);
+ if (temp2_fp != 0 &&
+ !tu->ratio && tu->dsc_en == 0) {
+ tu->ratio_fp = drm_fixp_mul(tu->ratio_fp, RATIO_SCALE_fp);
+ tu->ratio = drm_fixp2int(tu->ratio_fp);
+ if (tu->ratio)
+ tu->ratio_fp = drm_fixp_from_fraction(1, 1);
+ }
+
+ if (tu->ratio > 1)
+ tu->ratio = 1;
+
+ if (tu->ratio == 1)
+ goto tu_size_calc;
+
+ compare_result_1 = _tu_param_compare(tu->ratio_fp, const_p49_fp);
+ if (!compare_result_1 || compare_result_1 == 1)
+ compare_result_1 = 1;
+ else
+ compare_result_1 = 0;
+
+ compare_result_2 = _tu_param_compare(tu->ratio_fp, const_p56_fp);
+ if (!compare_result_2 || compare_result_2 == 2)
+ compare_result_2 = 1;
+ else
+ compare_result_2 = 0;
+
+ if (tu->dsc_en && compare_result_1 && compare_result_2) {
+ HBLANK_MARGIN += 4;
+ drm_dbg_dp(ctrl->drm_dev,
+ "increase HBLANK_MARGIN to %d\n", HBLANK_MARGIN);
+ }
+
+tu_size_calc:
+ for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) {
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+ temp = drm_fixp2int_ceil(temp2_fp);
+ temp1_fp = drm_fixp_from_fraction(temp, 1);
+ tu->n_err_fp = temp1_fp - temp2_fp;
+
+ if (tu->n_err_fp < tu->err_fp) {
+ tu->err_fp = tu->n_err_fp;
+ tu->tu_size_desired = tu->tu_size;
+ }
+ }
+
+ tu->tu_size_minus1 = tu->tu_size_desired - 1;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
+ temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+ tu->valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp2_fp = tu->lwidth_fp;
+ temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1);
+ temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+ tu->n_tus = drm_fixp2int(temp2_fp);
+ if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+ tu->n_tus += 1;
+
+ tu->even_distribution_legacy = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
+
+ drm_dbg_dp(ctrl->drm_dev,
+ "n_sym = %d, num_of_tus = %d\n",
+ tu->valid_boundary_link, tu->n_tus);
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
+ temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1);
+ temp2_fp = temp1_fp - temp2_fp;
+ temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ temp = drm_fixp2int(temp2_fp);
+ if (temp && temp2_fp)
+ tu->extra_bytes = drm_fixp2int_ceil(temp2_fp);
+ else
+ tu->extra_bytes = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1);
+ temp2_fp = drm_fixp_from_fraction(8, tu->bpp);
+ temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+ if (temp && temp1_fp)
+ tu->extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->extra_pclk_cycles = drm_fixp2int(temp1_fp);
+
+ temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
+ temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu->extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
+
+ tu->filler_size = tu->tu_size_desired - tu->valid_boundary_link;
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
+ tu->ratio_by_tu_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+
+ tu->delay_start_link = tu->extra_pclk_cycles_in_link_clk +
+ tu->filler_size + tu->extra_buffer_margin;
+
+ tu->resulting_valid_fp =
+ drm_fixp_from_fraction(tu->valid_boundary_link, 1);
+
+ temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
+ temp2_fp = drm_fixp_div(tu->resulting_valid_fp, temp1_fp);
+ tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp;
+
+ temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1);
+ temp1_fp = tu->hbp_relative_to_pclk_fp - temp1_fp;
+ tu->hbp_time_fp = drm_fixp_div(temp1_fp, tu->pclk_fp);
+
+ temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1);
+ tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
+
+ compare_result_1 = _tu_param_compare(tu->hbp_time_fp,
+ tu->delay_start_time_fp);
+ if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */
+ tu->min_hblank_violated = 1;
+
+ tu->hactive_time_fp = drm_fixp_div(tu->lwidth_fp, tu->pclk_fp);
+
+ compare_result_2 = _tu_param_compare(tu->hactive_time_fp,
+ tu->delay_start_time_fp);
+ if (compare_result_2 == 2)
+ tu->min_hblank_violated = 1;
+
+ tu->delay_start_time_fp = 0;
+
+ /* brute force */
+
+ tu->delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
+ tu->diff_abs_fp = tu->resulting_valid_fp - tu->ratio_by_tu_fp;
+
+ temp = drm_fixp2int(tu->diff_abs_fp);
+ if (!temp && tu->diff_abs_fp <= 0xffff)
+ tu->diff_abs_fp = 0;
+
+ /* if(diff_abs < 0) diff_abs *= -1 */
+ if (tu->diff_abs_fp < 0)
+ tu->diff_abs_fp = drm_fixp_mul(tu->diff_abs_fp, -1);
+
+ tu->boundary_mod_lower_err = 0;
+ if ((tu->diff_abs_fp != 0 &&
+ ((tu->diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
+ (tu->even_distribution_legacy == 0) ||
+ (DP_BRUTE_FORCE == 1))) ||
+ (tu->min_hblank_violated == 1)) {
+ do {
+ tu->err_fp = drm_fixp_from_fraction(1000, 1);
+
+ temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
+ temp2_fp = drm_fixp_from_fraction(
+ tu->delay_start_link_extra_pixclk, 1);
+ temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu->extra_buffer_margin =
+ drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->extra_buffer_margin = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp);
+
+ if (temp1_fp)
+ tu->n_symbols = drm_fixp2int_ceil(temp1_fp);
+ else
+ tu->n_symbols = 0;
+
+ for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) {
+ for (tu->i_upper_boundary_count = 1;
+ tu->i_upper_boundary_count <= 15;
+ tu->i_upper_boundary_count++) {
+ for (tu->i_lower_boundary_count = 1;
+ tu->i_lower_boundary_count <= 15;
+ tu->i_lower_boundary_count++) {
+ _tu_valid_boundary_calc(tu);
+ }
+ }
+ }
+ tu->delay_start_link_extra_pixclk--;
+ } while (tu->boundary_moderation_en != true &&
+ tu->boundary_mod_lower_err == 1 &&
+ tu->delay_start_link_extra_pixclk != 0);
+
+ if (tu->boundary_moderation_en == true) {
+ temp1_fp = drm_fixp_from_fraction(
+ (tu->upper_boundary_count *
+ tu->valid_boundary_link +
+ tu->lower_boundary_count *
+ (tu->valid_boundary_link - 1)), 1);
+ temp2_fp = drm_fixp_from_fraction(
+ (tu->upper_boundary_count +
+ tu->lower_boundary_count), 1);
+ tu->resulting_valid_fp =
+ drm_fixp_div(temp1_fp, temp2_fp);
+
+ temp1_fp = drm_fixp_from_fraction(
+ tu->tu_size_desired, 1);
+ tu->ratio_by_tu_fp =
+ drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+
+ tu->valid_lower_boundary_link =
+ tu->valid_boundary_link - 1;
+
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp);
+ temp2_fp = drm_fixp_div(temp1_fp,
+ tu->resulting_valid_fp);
+ tu->n_tus = drm_fixp2int(temp2_fp);
+
+ tu->tu_size_minus1 = tu->tu_size_desired - 1;
+ tu->even_distribution_BF = 1;
+
+ temp1_fp =
+ drm_fixp_from_fraction(tu->tu_size_desired, 1);
+ temp2_fp =
+ drm_fixp_div(tu->resulting_valid_fp, temp1_fp);
+ tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp;
+ }
+ }
+
+ temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu->lwidth_fp);
+
+ if (temp2_fp)
+ temp = drm_fixp2int_ceil(temp2_fp);
+ else
+ temp = 0;
+
+ temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
+ temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+ temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+ temp2_fp = drm_fixp_div(temp1_fp, temp2_fp);
+ temp1_fp = drm_fixp_from_fraction(temp, 1);
+ temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+ temp = drm_fixp2int(temp2_fp);
+
+ if (tu->async_en)
+ tu->delay_start_link += (int)temp;
+
+ temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1);
+ tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
+
+ /* OUTPUTS */
+ tu_table->valid_boundary_link = tu->valid_boundary_link;
+ tu_table->delay_start_link = tu->delay_start_link;
+ tu_table->boundary_moderation_en = tu->boundary_moderation_en;
+ tu_table->valid_lower_boundary_link = tu->valid_lower_boundary_link;
+ tu_table->upper_boundary_count = tu->upper_boundary_count;
+ tu_table->lower_boundary_count = tu->lower_boundary_count;
+ tu_table->tu_size_minus1 = tu->tu_size_minus1;
+
+ drm_dbg_dp(ctrl->drm_dev, "TU: valid_boundary_link: %d\n",
+ tu_table->valid_boundary_link);
+ drm_dbg_dp(ctrl->drm_dev, "TU: delay_start_link: %d\n",
+ tu_table->delay_start_link);
+ drm_dbg_dp(ctrl->drm_dev, "TU: boundary_moderation_en: %d\n",
+ tu_table->boundary_moderation_en);
+ drm_dbg_dp(ctrl->drm_dev, "TU: valid_lower_boundary_link: %d\n",
+ tu_table->valid_lower_boundary_link);
+ drm_dbg_dp(ctrl->drm_dev, "TU: upper_boundary_count: %d\n",
+ tu_table->upper_boundary_count);
+ drm_dbg_dp(ctrl->drm_dev, "TU: lower_boundary_count: %d\n",
+ tu_table->lower_boundary_count);
+ drm_dbg_dp(ctrl->drm_dev, "TU: tu_size_minus1: %d\n",
+ tu_table->tu_size_minus1);
+
+ kfree(tu);
+}
+
+static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
+ struct dp_vc_tu_mapping_table *tu_table)
+{
+ struct dp_tu_calc_input in;
+ struct drm_display_mode *drm_mode;
+
+ drm_mode = &ctrl->panel->dp_mode.drm_mode;
+
+ in.lclk = ctrl->link->link_params.rate / 1000;
+ in.pclk_khz = drm_mode->clock;
+ in.hactive = drm_mode->hdisplay;
+ in.hporch = drm_mode->htotal - drm_mode->hdisplay;
+ in.nlanes = ctrl->link->link_params.num_lanes;
+ in.bpp = ctrl->panel->dp_mode.bpp;
+ in.pixel_enc = 444;
+ in.dsc_en = 0;
+ in.async_en = 0;
+ in.fec_en = 0;
+ in.num_of_dsc_slices = 0;
+ in.compress_ratio = 100;
+
+ _dp_ctrl_calc_tu(ctrl, &in, tu_table);
+}
+
+static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+{
+ u32 dp_tu = 0x0;
+ u32 valid_boundary = 0x0;
+ u32 valid_boundary2 = 0x0;
+ struct dp_vc_tu_mapping_table tu_calc_table;
+
+ dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+
+ dp_tu |= tu_calc_table.tu_size_minus1;
+ valid_boundary |= tu_calc_table.valid_boundary_link;
+ valid_boundary |= (tu_calc_table.delay_start_link << 16);
+
+ valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
+ valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
+ valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
+
+ if (tu_calc_table.boundary_moderation_en)
+ valid_boundary2 |= BIT(0);
+
+ pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
+ dp_tu, valid_boundary, valid_boundary2);
+
+ dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
+ dp_tu, valid_boundary, valid_boundary2);
+}
+
+static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+
+ if (!wait_for_completion_timeout(&ctrl->video_comp,
+ WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) {
+ DRM_ERROR("wait4video timedout\n");
+ ret = -ETIMEDOUT;
+ }
+ return ret;
+}
+
+static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+{
+ struct dp_link *link = ctrl->link;
+ int ret = 0, lane, lane_cnt;
+ u8 buf[4];
+ u32 max_level_reached = 0;
+ u32 voltage_swing_level = link->phy_params.v_level;
+ u32 pre_emphasis_level = link->phy_params.p_level;
+
+ drm_dbg_dp(ctrl->drm_dev,
+ "voltage level: %d emphasis level: %d\n",
+ voltage_swing_level, pre_emphasis_level);
+ ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+ voltage_swing_level, pre_emphasis_level);
+
+ if (ret)
+ return ret;
+
+ if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "max. voltage swing level reached %d\n",
+ voltage_swing_level);
+ max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
+ }
+
+ if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "max. pre-emphasis level reached %d\n",
+ pre_emphasis_level);
+ max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ }
+
+ pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ lane_cnt = ctrl->link->link_params.num_lanes;
+ for (lane = 0; lane < lane_cnt; lane++)
+ buf[lane] = voltage_swing_level | pre_emphasis_level
+ | max_level_reached;
+
+ drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n",
+ voltage_swing_level | pre_emphasis_level);
+ ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
+ buf, lane_cnt);
+ if (ret == lane_cnt)
+ ret = 0;
+
+ return ret;
+}
+
+static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+ u8 pattern)
+{
+ u8 buf;
+ int ret = 0;
+
+ drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern);
+
+ buf = pattern;
+
+ if (pattern && pattern != DP_TRAINING_PATTERN_4)
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+ ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
+ return ret == 1;
+}
+
+static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
+ u8 *link_status)
+{
+ int ret = 0, len;
+
+ len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
+ if (len != DP_LINK_STATUS_SIZE) {
+ DRM_ERROR("DP link status read failed, err: %d\n", len);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+ int *training_step)
+{
+ int tries, old_v_level, ret = 0;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int const maximum_retries = 4;
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+ *training_step = DP_TRAINING_1;
+
+ ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1);
+ if (ret)
+ return ret;
+ dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret)
+ return ret;
+
+ tries = 0;
+ old_v_level = ctrl->link->phy_params.v_level;
+ for (tries = 0; tries < maximum_retries; tries++) {
+ drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
+
+ ret = dp_ctrl_read_link_status(ctrl, link_status);
+ if (ret)
+ return ret;
+
+ if (drm_dp_clock_recovery_ok(link_status,
+ ctrl->link->link_params.num_lanes)) {
+ return 0;
+ }
+
+ if (ctrl->link->phy_params.v_level >=
+ DP_TRAIN_VOLTAGE_SWING_MAX) {
+ DRM_ERROR_RATELIMITED("max v_level reached\n");
+ return -EAGAIN;
+ }
+
+ if (old_v_level != ctrl->link->phy_params.v_level) {
+ tries = 0;
+ old_v_level = ctrl->link->phy_params.v_level;
+ }
+
+ dp_link_adjust_levels(ctrl->link, link_status);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret)
+ return ret;
+ }
+
+ DRM_ERROR("max tries reached\n");
+ return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+
+ switch (ctrl->link->link_params.rate) {
+ case 810000:
+ ctrl->link->link_params.rate = 540000;
+ break;
+ case 540000:
+ ctrl->link->link_params.rate = 270000;
+ break;
+ case 270000:
+ ctrl->link->link_params.rate = 162000;
+ break;
+ case 162000:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ drm_dbg_dp(ctrl->drm_dev, "new rate=0x%x\n",
+ ctrl->link->link_params.rate);
+ }
+
+ return ret;
+}
+
+static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
+{
+
+ if (ctrl->link->link_params.num_lanes == 1)
+ return -1;
+
+ ctrl->link->link_params.num_lanes /= 2;
+ ctrl->link->link_params.rate = ctrl->panel->link_info.rate;
+
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
+ return 0;
+}
+
+static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+{
+ dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
+ drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+}
+
+static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
+ int *training_step)
+{
+ int tries = 0, ret = 0;
+ u8 pattern;
+ u32 state_ctrl_bit;
+ int const maximum_retries = 5;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+ *training_step = DP_TRAINING_2;
+
+ if (drm_dp_tps4_supported(ctrl->panel->dpcd)) {
+ pattern = DP_TRAINING_PATTERN_4;
+ state_ctrl_bit = 4;
+ } else if (drm_dp_tps3_supported(ctrl->panel->dpcd)) {
+ pattern = DP_TRAINING_PATTERN_3;
+ state_ctrl_bit = 3;
+ } else {
+ pattern = DP_TRAINING_PATTERN_2;
+ state_ctrl_bit = 2;
+ }
+
+ ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit);
+ if (ret)
+ return ret;
+
+ dp_ctrl_train_pattern_set(ctrl, pattern);
+
+ for (tries = 0; tries <= maximum_retries; tries++) {
+ drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
+
+ ret = dp_ctrl_read_link_status(ctrl, link_status);
+ if (ret)
+ return ret;
+
+ if (drm_dp_channel_eq_ok(link_status,
+ ctrl->link->link_params.num_lanes)) {
+ return 0;
+ }
+
+ dp_link_adjust_levels(ctrl->link, link_status);
+ ret = dp_ctrl_update_vx_px(ctrl);
+ if (ret)
+ return ret;
+
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+ int *training_step)
+{
+ int ret = 0;
+ const u8 *dpcd = ctrl->panel->dpcd;
+ u8 encoding[] = { 0, DP_SET_ANSI_8B10B };
+ u8 assr;
+ struct dp_link_info link_info = {0};
+
+ dp_ctrl_config_ctrl(ctrl);
+
+ link_info.num_lanes = ctrl->link->link_params.num_lanes;
+ link_info.rate = ctrl->link->link_params.rate;
+ link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
+
+ dp_aux_link_configure(ctrl->aux, &link_info);
+
+ if (drm_dp_max_downspread(dpcd))
+ encoding[0] |= DP_SPREAD_AMP_0_5;
+
+ /* config DOWNSPREAD_CTRL and MAIN_LINK_CHANNEL_CODING_SET */
+ drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, encoding, 2);
+
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+ drm_dp_dpcd_write(ctrl->aux, DP_EDP_CONFIGURATION_SET,
+ &assr, 1);
+ }
+
+ ret = dp_ctrl_link_train_1(ctrl, training_step);
+ if (ret) {
+ DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+ goto end;
+ }
+
+ /* print success info as this is a result of user initiated action */
+ drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
+
+ ret = dp_ctrl_link_train_2(ctrl, training_step);
+ if (ret) {
+ DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+ goto end;
+ }
+
+ /* print success info as this is a result of user initiated action */
+ drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
+
+end:
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+ return ret;
+}
+
+static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
+ int *training_step)
+{
+ int ret = 0;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+ if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+ return ret;
+
+ /*
+ * As part of previous calls, DP controller state might have
+ * transitioned to PUSH_IDLE. In order to start transmitting
+ * a link training pattern, we have to first do soft reset.
+ */
+
+ ret = dp_ctrl_link_train(ctrl, training_step);
+
+ return ret;
+}
+
+static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
+ enum dp_pm_type module, char *name, unsigned long rate)
+{
+ u32 num = ctrl->parser->mp[module].num_clk;
+ struct clk_bulk_data *cfg = ctrl->parser->mp[module].clocks;
+
+ while (num && strcmp(cfg->id, name)) {
+ num--;
+ cfg++;
+ }
+
+ drm_dbg_dp(ctrl->drm_dev, "setting rate=%lu on clk=%s\n",
+ rate, name);
+
+ if (num)
+ clk_set_rate(cfg->clk, rate);
+ else
+ DRM_ERROR("%s clock doesn't exit to set rate %lu\n",
+ name, rate);
+}
+
+static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+ struct dp_io *dp_io = &ctrl->parser->io;
+ struct phy *phy = dp_io->phy;
+ struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+ const u8 *dpcd = ctrl->panel->dpcd;
+
+ opts_dp->lanes = ctrl->link->link_params.num_lanes;
+ opts_dp->link_rate = ctrl->link->link_params.rate / 100;
+ opts_dp->ssc = drm_dp_max_downspread(dpcd);
+
+ phy_configure(phy, &dp_io->phy_opts);
+ phy_power_on(phy);
+
+ dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000);
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true);
+ if (ret)
+ DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
+
+ drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate);
+
+ return ret;
+}
+
+void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable)
+{
+ struct dp_ctrl_private *ctrl;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ /*
+ * all dp controller programmable registers will not
+ * be reset to default value after DP_SW_RESET
+ * therefore interrupt mask bits have to be updated
+ * to enable/disable interrupts
+ */
+ dp_catalog_ctrl_enable_irq(ctrl->catalog, enable);
+}
+
+void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ phy_init(phy);
+
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+}
+
+void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_phy_reset(ctrl->catalog);
+ phy_exit(phy);
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+}
+
+static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
+{
+ const u8 *dpcd = ctrl->panel->dpcd;
+
+ /*
+ * For better interop experience, used a fixed NVID=0x8000
+ * whenever connected to a VGA dongle downstream.
+ */
+ if (drm_dp_is_branch(dpcd))
+ return (drm_dp_has_quirk(&ctrl->panel->desc,
+ DP_DPCD_QUIRK_CONSTANT_N));
+
+ return false;
+}
+
+static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+ struct dp_io *dp_io = &ctrl->parser->io;
+ struct phy *phy = dp_io->phy;
+ struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+ opts_dp->lanes = ctrl->link->link_params.num_lanes;
+ phy_configure(phy, &dp_io->phy_opts);
+ /*
+ * Disable and re-enable the mainlink clock since the
+ * link clock might have been adjusted as part of the
+ * link maintenance.
+ */
+ dev_pm_opp_set_rate(ctrl->dev, 0);
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable clocks. ret=%d\n", ret);
+ return ret;
+ }
+ phy_power_off(phy);
+ /* hw recommended delay before re-enabling clocks */
+ msleep(20);
+
+ ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ if (ret) {
+ DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
+{
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret;
+
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ dev_pm_opp_set_rate(ctrl->dev, 0);
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ }
+
+ phy_power_off(phy);
+
+ /* aux channel down, reinit phy */
+ phy_exit(phy);
+ phy_init(phy);
+
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+ return 0;
+}
+
+static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+{
+ int ret = 0;
+ int training_step = DP_TRAINING_NONE;
+
+ dp_ctrl_push_idle(&ctrl->dp_ctrl);
+
+ ctrl->link->phy_params.p_level = 0;
+ ctrl->link->phy_params.v_level = 0;
+
+ ret = dp_ctrl_setup_main_link(ctrl, &training_step);
+ if (ret)
+ goto end;
+
+ dp_ctrl_clear_training_pattern(ctrl);
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+ ret = dp_ctrl_wait4video_ready(ctrl);
+end:
+ return ret;
+}
+
+static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
+{
+ bool success = false;
+ u32 pattern_sent = 0x0;
+ u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
+
+ drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested);
+
+ if (dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+ ctrl->link->phy_params.v_level,
+ ctrl->link->phy_params.p_level)) {
+ DRM_ERROR("Failed to set v/p levels\n");
+ return false;
+ }
+ dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+ dp_ctrl_update_vx_px(ctrl);
+ dp_link_send_test_response(ctrl->link);
+
+ pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+
+ switch (pattern_sent) {
+ case MR_LINK_TRAINING1:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_D10_2);
+ break;
+ case MR_LINK_SYMBOL_ERM:
+ success = ((pattern_requested ==
+ DP_PHY_TEST_PATTERN_ERROR_COUNT) ||
+ (pattern_requested ==
+ DP_PHY_TEST_PATTERN_CP2520));
+ break;
+ case MR_LINK_PRBS7:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_PRBS7);
+ break;
+ case MR_LINK_CUSTOM80:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_80BIT_CUSTOM);
+ break;
+ case MR_LINK_TRAINING4:
+ success = (pattern_requested ==
+ DP_PHY_TEST_PATTERN_SEL_MASK);
+ break;
+ default:
+ success = false;
+ }
+
+ drm_dbg_dp(ctrl->drm_dev, "%s: test->0x%x\n",
+ success ? "success" : "failed", pattern_requested);
+ return success;
+}
+
+static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+{
+ int ret;
+ unsigned long pixel_rate;
+
+ if (!ctrl->link->phy_params.phy_test_pattern_sel) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "no test pattern selected by sink\n");
+ return 0;
+ }
+
+ /*
+ * The global reset will need DP link related clocks to be
+ * running. Add the global reset just before disabling the
+ * link clocks and core clocks.
+ */
+ ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to disable DP controller\n");
+ return ret;
+ }
+
+ ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to enable DP link controller\n");
+ return ret;
+ }
+
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ dp_ctrl_send_phy_test_pattern(ctrl);
+
+ return 0;
+}
+
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ u32 sink_request = 0x0;
+
+ if (!dp_ctrl) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ sink_request = ctrl->link->sink_request;
+
+ if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+ drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n");
+ if (dp_ctrl_process_phy_test_request(ctrl)) {
+ DRM_ERROR("process phy_test_req failed\n");
+ return;
+ }
+ }
+
+ if (sink_request & DP_LINK_STATUS_UPDATED) {
+ if (dp_ctrl_link_maintenance(ctrl)) {
+ DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+ return;
+ }
+ }
+
+ if (sink_request & DP_TEST_LINK_TRAINING) {
+ dp_link_send_test_response(ctrl->link);
+ if (dp_ctrl_link_maintenance(ctrl)) {
+ DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+ return;
+ }
+ }
+}
+
+static bool dp_ctrl_clock_recovery_any_ok(
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int reduced_cnt;
+
+ if (lane_count <= 1)
+ return false;
+
+ /*
+ * only interested in the lane number after reduced
+ * lane_count = 4, then only interested in 2 lanes
+ * lane_count = 2, then only interested in 1 lane
+ */
+ reduced_cnt = lane_count >> 1;
+
+ return drm_dp_clock_recovery_ok(link_status, reduced_cnt);
+}
+
+static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ int num_lanes = ctrl->link->link_params.num_lanes;
+
+ dp_ctrl_read_link_status(ctrl, link_status);
+
+ return drm_dp_channel_eq_ok(link_status, num_lanes);
+}
+
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+{
+ int rc = 0;
+ struct dp_ctrl_private *ctrl;
+ u32 rate;
+ int link_train_max_retries = 5;
+ u32 const phy_cts_pixel_clk_khz = 148500;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ unsigned int training_step;
+ unsigned long pixel_rate;
+
+ if (!dp_ctrl)
+ return -EINVAL;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ rate = ctrl->panel->link_info.rate;
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+ dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
+
+ if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "using phy test link parameters\n");
+ if (!pixel_rate)
+ pixel_rate = phy_cts_pixel_clk_khz;
+ } else {
+ ctrl->link->link_params.rate = rate;
+ ctrl->link->link_params.num_lanes =
+ ctrl->panel->link_info.num_lanes;
+ }
+
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
+ ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
+ pixel_rate);
+
+ rc = dp_ctrl_enable_mainlink_clocks(ctrl);
+ if (rc)
+ return rc;
+
+ while (--link_train_max_retries) {
+ training_step = DP_TRAINING_NONE;
+ rc = dp_ctrl_setup_main_link(ctrl, &training_step);
+ if (rc == 0) {
+ /* training completed successfully */
+ break;
+ } else if (training_step == DP_TRAINING_1) {
+ /* link train_1 failed */
+ if (!dp_catalog_link_is_connected(ctrl->catalog))
+ break;
+
+ dp_ctrl_read_link_status(ctrl, link_status);
+
+ rc = dp_ctrl_link_rate_down_shift(ctrl);
+ if (rc < 0) { /* already in RBR = 1.6G */
+ if (dp_ctrl_clock_recovery_any_ok(link_status,
+ ctrl->link->link_params.num_lanes)) {
+ /*
+ * some lanes are ready,
+ * reduce lane number
+ */
+ rc = dp_ctrl_link_lane_down_shift(ctrl);
+ if (rc < 0) { /* lane == 1 already */
+ /* end with failure */
+ break;
+ }
+ } else {
+ /* end with failure */
+ break; /* lane == 1 already */
+ }
+ }
+ } else if (training_step == DP_TRAINING_2) {
+ /* link train_2 failed */
+ if (!dp_catalog_link_is_connected(ctrl->catalog))
+ break;
+
+ dp_ctrl_read_link_status(ctrl, link_status);
+
+ if (!drm_dp_clock_recovery_ok(link_status,
+ ctrl->link->link_params.num_lanes))
+ rc = dp_ctrl_link_rate_down_shift(ctrl);
+ else
+ rc = dp_ctrl_link_lane_down_shift(ctrl);
+
+ if (rc < 0) {
+ /* end with failure */
+ break; /* lane == 1 already */
+ }
+
+ /* stop link training before start re training */
+ dp_ctrl_clear_training_pattern(ctrl);
+ }
+
+ rc = dp_ctrl_reinitialize_mainlink(ctrl);
+ if (rc) {
+ DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
+ break;
+ }
+ }
+
+ if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+ return rc;
+
+ if (rc == 0) { /* link train successfully */
+ /*
+ * do not stop train pattern here
+ * stop link training at on_stream
+ * to pass compliance test
+ */
+ } else {
+ /*
+ * link training failed
+ * end txing train pattern here
+ */
+ dp_ctrl_clear_training_pattern(ctrl);
+
+ dp_ctrl_deinitialize_mainlink(ctrl);
+ rc = -ECONNRESET;
+ }
+
+ return rc;
+}
+
+static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
+{
+ int training_step = DP_TRAINING_NONE;
+
+ return dp_ctrl_setup_main_link(ctrl, &training_step);
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
+{
+ int ret = 0;
+ bool mainlink_ready = false;
+ struct dp_ctrl_private *ctrl;
+ unsigned long pixel_rate;
+ unsigned long pixel_rate_orig;
+
+ if (!dp_ctrl)
+ return -EINVAL;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
+
+ if (dp_ctrl->wide_bus_en)
+ pixel_rate >>= 1;
+
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
+ ctrl->link->link_params.rate,
+ ctrl->link->link_params.num_lanes, pixel_rate);
+
+ if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
+ ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+ if (ret) {
+ DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
+ goto end;
+ }
+ }
+
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ if (ret) {
+ DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret);
+ goto end;
+ }
+
+ if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl))
+ dp_ctrl_link_retrain(ctrl);
+
+ /* stop txing train pattern to end link training */
+ dp_ctrl_clear_training_pattern(ctrl);
+
+ /*
+ * Set up transfer unit values and set controller state to send
+ * video.
+ */
+ reinit_completion(&ctrl->video_comp);
+
+ dp_ctrl_configure_source_params(ctrl);
+
+ dp_catalog_ctrl_config_msa(ctrl->catalog,
+ ctrl->link->link_params.rate,
+ pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl));
+
+ dp_ctrl_setup_tr_unit(ctrl);
+
+ dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+ ret = dp_ctrl_wait4video_ready(ctrl);
+ if (ret)
+ return ret;
+
+ mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+ drm_dbg_dp(ctrl->drm_dev,
+ "mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+
+end:
+ return ret;
+}
+
+int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ /* set dongle to D3 (power off) mode */
+ dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true);
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ if (dp_power_clk_status(ctrl->power, DP_STREAM_PM)) {
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable pclk. ret=%d\n", ret);
+ return ret;
+ }
+ }
+
+ dev_pm_opp_set_rate(ctrl->dev, 0);
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ phy_power_off(phy);
+
+ /* aux channel down, reinit phy */
+ phy_exit(phy);
+ phy_init(phy);
+
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+ return ret;
+}
+
+int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ }
+
+ DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+
+ phy_power_off(phy);
+
+ DRM_DEBUG_DP("After, phy=%p init_count=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+
+ return ret;
+}
+
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret = 0;
+
+ if (!dp_ctrl)
+ return -EINVAL;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ dp_catalog_ctrl_reset(ctrl->catalog);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
+ if (ret)
+ DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
+
+ dev_pm_opp_set_rate(ctrl->dev, 0);
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ }
+
+ phy_power_off(phy);
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+
+ return ret;
+}
+
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ u32 isr;
+
+ if (!dp_ctrl)
+ return;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+ isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
+
+ if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
+ drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n");
+ complete(&ctrl->video_comp);
+ }
+
+ if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) {
+ drm_dbg_dp(ctrl->drm_dev, "idle_patterns_sent\n");
+ complete(&ctrl->idle_comp);
+ }
+}
+
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+ struct dp_panel *panel, struct drm_dp_aux *aux,
+ struct dp_power *power, struct dp_catalog *catalog,
+ struct dp_parser *parser)
+{
+ struct dp_ctrl_private *ctrl;
+ int ret;
+
+ if (!dev || !panel || !aux ||
+ !link || !catalog) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ DRM_ERROR("Mem allocation failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = devm_pm_opp_set_clkname(dev, "ctrl_link");
+ if (ret) {
+ dev_err(dev, "invalid DP OPP table in device tree\n");
+ /* caller do PTR_ERR(opp_table) */
+ return (struct dp_ctrl *)ERR_PTR(ret);
+ }
+
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ dev_err(dev, "failed to add DP OPP table\n");
+
+ init_completion(&ctrl->idle_comp);
+ init_completion(&ctrl->video_comp);
+
+ /* in parameters */
+ ctrl->parser = parser;
+ ctrl->panel = panel;
+ ctrl->power = power;
+ ctrl->aux = aux;
+ ctrl->link = link;
+ ctrl->catalog = catalog;
+ ctrl->dev = dev;
+
+ return &ctrl->dp_ctrl;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
new file mode 100644
index 000000000..9f29734af
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CTRL_H_
+#define _DP_CTRL_H_
+
+#include "dp_aux.h"
+#include "dp_panel.h"
+#include "dp_link.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+
+struct dp_ctrl {
+ bool orientation;
+ atomic_t aborted;
+ bool wide_bus_en;
+};
+
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train);
+int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+ struct dp_panel *panel, struct drm_dp_aux *aux,
+ struct dp_power *power, struct dp_catalog *catalog,
+ struct dp_parser *parser);
+
+void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable);
+void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl);
+
+#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
new file mode 100644
index 000000000..5e35033ba
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_file.h>
+
+#include "dp_parser.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_ctrl.h"
+#include "dp_debug.h"
+#include "dp_display.h"
+
+#define DEBUG_NAME "msm_dp"
+
+struct dp_debug_private {
+ struct dentry *root;
+
+ struct dp_usbpd *usbpd;
+ struct dp_link *link;
+ struct dp_panel *panel;
+ struct drm_connector *connector;
+ struct device *dev;
+ struct drm_device *drm_dev;
+
+ struct dp_debug dp_debug;
+};
+
+static int dp_debug_show(struct seq_file *seq, void *p)
+{
+ struct dp_debug_private *debug = seq->private;
+ u64 lclk = 0;
+ u32 link_params_rate;
+ const struct drm_display_mode *drm_mode;
+
+ if (!debug)
+ return -ENODEV;
+
+ drm_mode = &debug->panel->dp_mode.drm_mode;
+
+ seq_printf(seq, "\tname = %s\n", DEBUG_NAME);
+ seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n",
+ debug->panel->link_info.rate);
+ seq_printf(seq, "\t\tnum_lanes = %u\n",
+ debug->panel->link_info.num_lanes);
+ seq_printf(seq, "\t\tcapabilities = %lu\n",
+ debug->panel->link_info.capabilities);
+ seq_printf(seq, "\tdp_panel_info:\n\t\tactive = %dx%d\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay);
+ seq_printf(seq, "\t\tback_porch = %dx%d\n",
+ drm_mode->htotal - drm_mode->hsync_end,
+ drm_mode->vtotal - drm_mode->vsync_end);
+ seq_printf(seq, "\t\tfront_porch = %dx%d\n",
+ drm_mode->hsync_start - drm_mode->hdisplay,
+ drm_mode->vsync_start - drm_mode->vdisplay);
+ seq_printf(seq, "\t\tsync_width = %dx%d\n",
+ drm_mode->hsync_end - drm_mode->hsync_start,
+ drm_mode->vsync_end - drm_mode->vsync_start);
+ seq_printf(seq, "\t\tactive_low = %dx%d\n",
+ debug->panel->dp_mode.h_active_low,
+ debug->panel->dp_mode.v_active_low);
+ seq_printf(seq, "\t\th_skew = %d\n",
+ drm_mode->hskew);
+ seq_printf(seq, "\t\trefresh rate = %d\n",
+ drm_mode_vrefresh(drm_mode));
+ seq_printf(seq, "\t\tpixel clock khz = %d\n",
+ drm_mode->clock);
+ seq_printf(seq, "\t\tbpp = %d\n",
+ debug->panel->dp_mode.bpp);
+
+ /* Link Information */
+ seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n",
+ debug->link->sink_request);
+ seq_printf(seq, "\t\tnum_lanes = %d\n",
+ debug->link->link_params.num_lanes);
+ link_params_rate = debug->link->link_params.rate;
+ seq_printf(seq, "\t\tbw_code = %d\n",
+ drm_dp_link_rate_to_bw_code(link_params_rate));
+ lclk = debug->link->link_params.rate * 1000;
+ seq_printf(seq, "\t\tlclk = %lld\n", lclk);
+ seq_printf(seq, "\t\tv_level = %d\n",
+ debug->link->phy_params.v_level);
+ seq_printf(seq, "\t\tp_level = %d\n",
+ debug->link->phy_params.p_level);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_debug);
+
+static int dp_test_data_show(struct seq_file *m, void *data)
+{
+ const struct dp_debug_private *debug = m->private;
+ const struct drm_connector *connector = debug->connector;
+ u32 bpc;
+
+ if (connector->status == connector_status_connected) {
+ bpc = debug->link->test_video.test_bit_depth;
+ seq_printf(m, "hdisplay: %d\n",
+ debug->link->test_video.test_h_width);
+ seq_printf(m, "vdisplay: %d\n",
+ debug->link->test_video.test_v_height);
+ seq_printf(m, "bpc: %u\n",
+ dp_link_bit_depth_to_bpc(bpc));
+ } else {
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_data);
+
+static int dp_test_type_show(struct seq_file *m, void *data)
+{
+ const struct dp_debug_private *debug = m->private;
+ const struct drm_connector *connector = debug->connector;
+
+ if (connector->status == connector_status_connected)
+ seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
+ else
+ seq_puts(m, "0");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_type);
+
+static ssize_t dp_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ const struct dp_debug_private *debug;
+ const struct drm_connector *connector;
+ int val = 0;
+
+ debug = ((struct seq_file *)file->private_data)->private;
+ connector = debug->connector;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
+
+ DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
+
+ if (connector->status == connector_status_connected) {
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0) {
+ kfree(input_buffer);
+ return status;
+ }
+ DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ debug->panel->video_test = true;
+ else
+ debug->panel->video_test = false;
+ }
+ kfree(input_buffer);
+
+ *offp += len;
+ return len;
+}
+
+static int dp_test_active_show(struct seq_file *m, void *data)
+{
+ struct dp_debug_private *debug = m->private;
+ struct drm_connector *connector = debug->connector;
+
+ if (connector->status == connector_status_connected) {
+ if (debug->panel->video_test)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else {
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+
+static int dp_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, dp_test_active_show,
+ inode->i_private);
+}
+
+static const struct file_operations test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = dp_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dp_test_active_write
+};
+
+static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
+{
+ char path[64];
+ struct dp_debug_private *debug = container_of(dp_debug,
+ struct dp_debug_private, dp_debug);
+
+ snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name);
+
+ debug->root = debugfs_create_dir(path, minor->debugfs_root);
+
+ debugfs_create_file("dp_debug", 0444, debug->root,
+ debug, &dp_debug_fops);
+
+ debugfs_create_file("msm_dp_test_active", 0444,
+ debug->root,
+ debug, &test_active_fops);
+
+ debugfs_create_file("msm_dp_test_data", 0444,
+ debug->root,
+ debug, &dp_test_data_fops);
+
+ debugfs_create_file("msm_dp_test_type", 0444,
+ debug->root,
+ debug, &dp_test_type_fops);
+}
+
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+ struct dp_usbpd *usbpd, struct dp_link *link,
+ struct drm_connector *connector, struct drm_minor *minor)
+{
+ struct dp_debug_private *debug;
+ struct dp_debug *dp_debug;
+ int rc;
+
+ if (!dev || !panel || !usbpd || !link) {
+ DRM_ERROR("invalid input\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
+ if (!debug) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ debug->dp_debug.debug_en = false;
+ debug->usbpd = usbpd;
+ debug->link = link;
+ debug->panel = panel;
+ debug->dev = dev;
+ debug->drm_dev = minor->dev;
+ debug->connector = connector;
+
+ dp_debug = &debug->dp_debug;
+ dp_debug->vdisplay = 0;
+ dp_debug->hdisplay = 0;
+ dp_debug->vrefresh = 0;
+
+ dp_debug_init(dp_debug, minor);
+
+ return dp_debug;
+ error:
+ return ERR_PTR(rc);
+}
+
+static int dp_debug_deinit(struct dp_debug *dp_debug)
+{
+ struct dp_debug_private *debug;
+
+ if (!dp_debug)
+ return -EINVAL;
+
+ debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+ debugfs_remove_recursive(debug->root);
+
+ return 0;
+}
+
+void dp_debug_put(struct dp_debug *dp_debug)
+{
+ struct dp_debug_private *debug;
+
+ if (!dp_debug)
+ return;
+
+ debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+ dp_debug_deinit(dp_debug);
+
+ devm_kfree(debug->dev, debug);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
new file mode 100644
index 000000000..8c0d0b517
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DEBUG_H_
+#define _DP_DEBUG_H_
+
+#include "dp_panel.h"
+#include "dp_link.h"
+
+/**
+ * struct dp_debug
+ * @debug_en: specifies whether debug mode enabled
+ * @vdisplay: used to filter out vdisplay value
+ * @hdisplay: used to filter out hdisplay value
+ * @vrefresh: used to filter out vrefresh value
+ * @tpg_state: specifies whether tpg feature is enabled
+ */
+struct dp_debug {
+ bool debug_en;
+ int aspect_ratio;
+ int vdisplay;
+ int hdisplay;
+ int vrefresh;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dp_debug_get() - configure and get the DisplayPlot debug module data
+ *
+ * @dev: device instance of the caller
+ * @panel: instance of panel module
+ * @usbpd: instance of usbpd module
+ * @link: instance of link module
+ * @connector: double pointer to display connector
+ * @minor: pointer to drm minor number after device registration
+ * return: pointer to allocated debug module data
+ *
+ * This function sets up the debug module and provides a way
+ * for debugfs input to be communicated with existing modules
+ */
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+ struct dp_usbpd *usbpd, struct dp_link *link,
+ struct drm_connector *connector,
+ struct drm_minor *minor);
+
+/**
+ * dp_debug_put()
+ *
+ * Cleans up dp_debug instance
+ *
+ * @dp_debug: instance of dp_debug
+ */
+void dp_debug_put(struct dp_debug *dp_debug);
+
+#else
+
+static inline
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+ struct dp_usbpd *usbpd, struct dp_link *link,
+ struct drm_connector *connector, struct drm_minor *minor)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void dp_debug_put(struct dp_debug *dp_debug)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* _DP_DEBUG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
new file mode 100644
index 000000000..d16c12351
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -0,0 +1,1784 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+#include <drm/display/drm_dp_aux_bus.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_hpd.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_reg.h"
+#include "dp_link.h"
+#include "dp_panel.h"
+#include "dp_ctrl.h"
+#include "dp_display.h"
+#include "dp_drm.h"
+#include "dp_audio.h"
+#include "dp_debug.h"
+
+#define HPD_STRING_SIZE 30
+
+enum {
+ ISR_DISCONNECTED,
+ ISR_CONNECT_PENDING,
+ ISR_CONNECTED,
+ ISR_HPD_REPLUG_COUNT,
+ ISR_IRQ_HPD_PULSE_COUNT,
+ ISR_HPD_LO_GLITH_COUNT,
+};
+
+/* event thread connection state */
+enum {
+ ST_DISCONNECTED,
+ ST_MAINLINK_READY,
+ ST_CONNECTED,
+ ST_DISCONNECT_PENDING,
+ ST_DISPLAY_OFF,
+ ST_SUSPENDED,
+};
+
+enum {
+ EV_NO_EVENT,
+ /* hpd events */
+ EV_HPD_INIT_SETUP,
+ EV_HPD_PLUG_INT,
+ EV_IRQ_HPD_INT,
+ EV_HPD_UNPLUG_INT,
+ EV_USER_NOTIFICATION,
+};
+
+#define EVENT_TIMEOUT (HZ/10) /* 100ms */
+#define DP_EVENT_Q_MAX 8
+
+#define DP_TIMEOUT_NONE 0
+
+#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
+
+struct dp_event {
+ u32 event_id;
+ u32 data;
+ u32 delay;
+};
+
+struct dp_display_private {
+ char *name;
+ int irq;
+
+ unsigned int id;
+
+ /* state variables */
+ bool core_initialized;
+ bool phy_initialized;
+ bool hpd_irq_on;
+ bool audio_supported;
+
+ struct drm_device *drm_dev;
+ struct platform_device *pdev;
+ struct dentry *root;
+
+ struct dp_usbpd *usbpd;
+ struct dp_parser *parser;
+ struct dp_power *power;
+ struct dp_catalog *catalog;
+ struct drm_dp_aux *aux;
+ struct dp_link *link;
+ struct dp_panel *panel;
+ struct dp_ctrl *ctrl;
+ struct dp_debug *debug;
+
+ struct dp_usbpd_cb usbpd_cb;
+ struct dp_display_mode dp_mode;
+ struct msm_dp dp_display;
+
+ /* wait for audio signaling */
+ struct completion audio_comp;
+
+ /* event related only access by event thread */
+ struct mutex event_mutex;
+ wait_queue_head_t event_q;
+ u32 hpd_state;
+ u32 event_pndx;
+ u32 event_gndx;
+ struct task_struct *ev_tsk;
+ struct dp_event event_list[DP_EVENT_Q_MAX];
+ spinlock_t event_lock;
+
+ bool wide_bus_en;
+
+ struct dp_audio *audio;
+};
+
+struct msm_dp_desc {
+ phys_addr_t io_start;
+ unsigned int connector_type;
+ bool wide_bus_en;
+};
+
+struct msm_dp_config {
+ const struct msm_dp_desc *descs;
+ size_t num_descs;
+};
+
+static const struct msm_dp_desc sc7180_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+};
+
+static const struct msm_dp_config sc7180_dp_cfg = {
+ .descs = sc7180_dp_descs,
+ .num_descs = ARRAY_SIZE(sc7180_dp_descs),
+};
+
+static const struct msm_dp_desc sc7280_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+};
+
+static const struct msm_dp_config sc7280_dp_cfg = {
+ .descs = sc7280_dp_descs,
+ .num_descs = ARRAY_SIZE(sc7280_dp_descs),
+};
+
+static const struct msm_dp_desc sc8180x_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0ae98000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ [MSM_DP_CONTROLLER_2] = { .io_start = 0x0ae9a000, .connector_type = DRM_MODE_CONNECTOR_eDP },
+};
+
+static const struct msm_dp_config sc8180x_dp_cfg = {
+ .descs = sc8180x_dp_descs,
+ .num_descs = ARRAY_SIZE(sc8180x_dp_descs),
+};
+
+static const struct msm_dp_desc sm8350_dp_descs[] = {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+};
+
+static const struct msm_dp_config sm8350_dp_cfg = {
+ .descs = sm8350_dp_descs,
+ .num_descs = ARRAY_SIZE(sm8350_dp_descs),
+};
+
+static const struct of_device_id dp_dt_match[] = {
+ { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
+ { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_cfg },
+ { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg },
+ { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_cfg },
+ { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_cfg },
+ { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_cfg },
+ {}
+};
+
+static struct dp_display_private *dev_get_dp_display_private(struct device *dev)
+{
+ struct msm_dp *dp = dev_get_drvdata(dev);
+
+ return container_of(dp, struct dp_display_private, dp_display);
+}
+
+static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
+ u32 data, u32 delay)
+{
+ unsigned long flag;
+ struct dp_event *todo;
+ int pndx;
+
+ spin_lock_irqsave(&dp_priv->event_lock, flag);
+ pndx = dp_priv->event_pndx + 1;
+ pndx %= DP_EVENT_Q_MAX;
+ if (pndx == dp_priv->event_gndx) {
+ pr_err("event_q is full: pndx=%d gndx=%d\n",
+ dp_priv->event_pndx, dp_priv->event_gndx);
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ return -EPERM;
+ }
+ todo = &dp_priv->event_list[dp_priv->event_pndx++];
+ dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo->event_id = event;
+ todo->data = data;
+ todo->delay = delay;
+ wake_up(&dp_priv->event_q);
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+ return 0;
+}
+
+static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+{
+ unsigned long flag;
+ struct dp_event *todo;
+ u32 gndx;
+
+ spin_lock_irqsave(&dp_priv->event_lock, flag);
+ if (dp_priv->event_pndx == dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ return -ENOENT;
+ }
+
+ gndx = dp_priv->event_gndx;
+ while (dp_priv->event_pndx != gndx) {
+ todo = &dp_priv->event_list[gndx];
+ if (todo->event_id == event) {
+ todo->event_id = EV_NO_EVENT; /* deleted */
+ todo->delay = 0;
+ }
+ gndx++;
+ gndx %= DP_EVENT_Q_MAX;
+ }
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+ return 0;
+}
+
+void dp_display_signal_audio_start(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ reinit_completion(&dp->audio_comp);
+}
+
+void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ complete_all(&dp->audio_comp);
+}
+
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv);
+
+static int dp_display_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ int rc = 0;
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *drm = priv->dev;
+
+ dp->dp_display.drm_dev = drm;
+ priv->dp[dp->id] = &dp->dp_display;
+
+ rc = dp->parser->parse(dp->parser);
+ if (rc) {
+ DRM_ERROR("device tree parsing failed\n");
+ goto end;
+ }
+
+
+ dp->drm_dev = drm;
+ dp->aux->drm_dev = drm;
+ rc = dp_aux_register(dp->aux);
+ if (rc) {
+ DRM_ERROR("DRM DP AUX register failed\n");
+ goto end;
+ }
+
+ rc = dp_power_client_init(dp->power);
+ if (rc) {
+ DRM_ERROR("Power client create failed\n");
+ goto end;
+ }
+
+ rc = dp_register_audio_driver(dev, dp->audio);
+ if (rc) {
+ DRM_ERROR("Audio registration Dp failed\n");
+ goto end;
+ }
+
+ rc = dp_hpd_event_thread_start(dp);
+ if (rc) {
+ DRM_ERROR("Event thread create failed\n");
+ goto end;
+ }
+
+ return 0;
+end:
+ return rc;
+}
+
+static void dp_display_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+
+ /* disable all HPD interrupts */
+ if (dp->core_initialized)
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+
+ kthread_stop(dp->ev_tsk);
+
+ dp_power_client_deinit(dp->power);
+ dp_unregister_audio_driver(dev, dp->audio);
+ dp_aux_unregister(dp->aux);
+ dp->drm_dev = NULL;
+ dp->aux->drm_dev = NULL;
+ priv->dp[dp->id] = NULL;
+}
+
+static const struct component_ops dp_display_comp_ops = {
+ .bind = dp_display_bind,
+ .unbind = dp_display_unbind,
+};
+
+static bool dp_display_is_ds_bridge(struct dp_panel *panel)
+{
+ return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+ DP_DWN_STRM_PORT_PRESENT);
+}
+
+static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
+{
+ drm_dbg_dp(dp->drm_dev, "present=%#x sink_count=%d\n",
+ dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
+ dp->link->sink_count);
+ return dp_display_is_ds_bridge(dp->panel) &&
+ (dp->link->sink_count == 0);
+}
+
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ struct drm_connector *connector;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ connector = dp->dp_display.connector;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
+
+static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+ bool hpd)
+{
+ if ((hpd && dp->dp_display.is_connected) ||
+ (!hpd && !dp->dp_display.is_connected)) {
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
+ (hpd ? "on" : "off"));
+ return 0;
+ }
+
+ /* reset video pattern flag on disconnect */
+ if (!hpd)
+ dp->panel->video_test = false;
+
+ dp->dp_display.is_connected = hpd;
+
+ drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
+ dp->dp_display.connector_type, hpd);
+ dp_display_send_hpd_event(&dp->dp_display);
+
+ return 0;
+}
+
+static int dp_display_process_hpd_high(struct dp_display_private *dp)
+{
+ int rc = 0;
+ struct edid *edid;
+
+ dp->panel->max_dp_lanes = dp->parser->max_dp_lanes;
+
+ rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
+ if (rc)
+ goto end;
+
+ dp_link_process_request(dp->link);
+
+ edid = dp->panel->edid;
+
+ dp->audio_supported = drm_detect_monitor_audio(edid);
+ dp_panel_handle_sink_request(dp->panel);
+
+ dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
+
+ /*
+ * set sink to normal operation mode -- D0
+ * before dpcd read
+ */
+ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+
+ dp_link_reset_phy_params_vx_px(dp->link);
+ rc = dp_ctrl_on_link(dp->ctrl);
+ if (rc) {
+ DRM_ERROR("failed to complete DP link training\n");
+ goto end;
+ }
+
+ dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
+
+end:
+ return rc;
+}
+
+static void dp_display_host_phy_init(struct dp_display_private *dp)
+{
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
+ dp->dp_display.connector_type, dp->core_initialized,
+ dp->phy_initialized);
+
+ if (!dp->phy_initialized) {
+ dp_ctrl_phy_init(dp->ctrl);
+ dp->phy_initialized = true;
+ }
+}
+
+static void dp_display_host_phy_exit(struct dp_display_private *dp)
+{
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
+ dp->dp_display.connector_type, dp->core_initialized,
+ dp->phy_initialized);
+
+ if (dp->phy_initialized) {
+ dp_ctrl_phy_exit(dp->ctrl);
+ dp->phy_initialized = false;
+ }
+}
+
+static void dp_display_host_init(struct dp_display_private *dp)
+{
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
+ dp->dp_display.connector_type, dp->core_initialized,
+ dp->phy_initialized);
+
+ dp_power_init(dp->power, false);
+ dp_ctrl_reset_irq_ctrl(dp->ctrl, true);
+ dp_aux_init(dp->aux);
+ dp->core_initialized = true;
+}
+
+static void dp_display_host_deinit(struct dp_display_private *dp)
+{
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
+ dp->dp_display.connector_type, dp->core_initialized,
+ dp->phy_initialized);
+
+ dp_ctrl_reset_irq_ctrl(dp->ctrl, false);
+ dp_aux_deinit(dp->aux);
+ dp_power_deinit(dp->power);
+ dp->core_initialized = false;
+}
+
+static int dp_display_usbpd_configure_cb(struct device *dev)
+{
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
+
+ dp_display_host_phy_init(dp);
+
+ return dp_display_process_hpd_high(dp);
+}
+
+static int dp_display_usbpd_disconnect_cb(struct device *dev)
+{
+ return 0;
+}
+
+static int dp_display_notify_disconnect(struct device *dev)
+{
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
+
+ dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+
+ return 0;
+}
+
+static void dp_display_handle_video_request(struct dp_display_private *dp)
+{
+ if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
+ dp->panel->video_test = true;
+ dp_link_send_test_response(dp->link);
+ }
+}
+
+static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
+{
+ int rc = 0;
+
+ if (dp_display_is_sink_count_zero(dp)) {
+ drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n");
+ if (dp->hpd_state != ST_DISCONNECTED) {
+ dp->hpd_state = ST_DISCONNECT_PENDING;
+ dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+ }
+ } else {
+ if (dp->hpd_state == ST_DISCONNECTED) {
+ dp->hpd_state = ST_MAINLINK_READY;
+ rc = dp_display_process_hpd_high(dp);
+ if (rc)
+ dp->hpd_state = ST_DISCONNECTED;
+ }
+ }
+
+ return rc;
+}
+
+static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
+{
+ u32 sink_request = dp->link->sink_request;
+
+ drm_dbg_dp(dp->drm_dev, "%d\n", sink_request);
+ if (dp->hpd_state == ST_DISCONNECTED) {
+ if (sink_request & DP_LINK_STATUS_UPDATED) {
+ drm_dbg_dp(dp->drm_dev, "Disconnected sink_request: %d\n",
+ sink_request);
+ DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
+ return -EINVAL;
+ }
+ }
+
+ dp_ctrl_handle_sink_request(dp->ctrl);
+
+ if (sink_request & DP_TEST_LINK_VIDEO_PATTERN)
+ dp_display_handle_video_request(dp);
+
+ return 0;
+}
+
+static int dp_display_usbpd_attention_cb(struct device *dev)
+{
+ int rc = 0;
+ u32 sink_request;
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
+
+ /* check for any test request issued by sink */
+ rc = dp_link_process_request(dp->link);
+ if (!rc) {
+ sink_request = dp->link->sink_request;
+ drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
+ dp->hpd_state, sink_request);
+ if (sink_request & DS_PORT_STATUS_CHANGED)
+ rc = dp_display_handle_port_ststus_changed(dp);
+ else
+ rc = dp_display_handle_irq_hpd(dp);
+ }
+
+ return rc;
+}
+
+static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+{
+ struct dp_usbpd *hpd = dp->usbpd;
+ u32 state;
+ int ret;
+
+ if (!hpd)
+ return 0;
+
+ mutex_lock(&dp->event_mutex);
+
+ state = dp->hpd_state;
+ drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
+ dp->dp_display.connector_type, state);
+
+ if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_MAINLINK_READY || state == ST_CONNECTED) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_DISCONNECT_PENDING) {
+ /* wait until ST_DISCONNECTED */
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
+ if (ret) { /* link train failed */
+ dp->hpd_state = ST_DISCONNECTED;
+ } else {
+ dp->hpd_state = ST_MAINLINK_READY;
+ }
+
+ /* enable HDP irq_hpd/replug interrupt */
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
+
+ drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
+ dp->dp_display.connector_type, state);
+ mutex_unlock(&dp->event_mutex);
+
+ /* uevent will complete connection part */
+ return 0;
+};
+
+static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
+ bool plugged)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display,
+ struct dp_display_private, dp_display);
+
+ /* notify audio subsystem only if sink supports audio */
+ if (dp_display->plugged_cb && dp_display->codec_dev &&
+ dp->audio_supported)
+ dp_display->plugged_cb(dp_display->codec_dev, plugged);
+}
+
+static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+{
+ struct dp_usbpd *hpd = dp->usbpd;
+ u32 state;
+
+ if (!hpd)
+ return 0;
+
+ mutex_lock(&dp->event_mutex);
+
+ state = dp->hpd_state;
+
+ drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
+ dp->dp_display.connector_type, state);
+
+ /* disable irq_hpd/replug interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, false);
+
+ /* unplugged, no more irq_hpd handle */
+ dp_del_event(dp, EV_IRQ_HPD_INT);
+
+ if (state == ST_DISCONNECTED) {
+ /* triggered by irq_hdp with sink_count = 0 */
+ if (dp->link->sink_count == 0) {
+ dp_display_host_phy_exit(dp);
+ }
+ dp_display_notify_disconnect(&dp->pdev->dev);
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ } else if (state == ST_DISCONNECT_PENDING) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ } else if (state == ST_MAINLINK_READY) {
+ dp_ctrl_off_link(dp->ctrl);
+ dp_display_host_phy_exit(dp);
+ dp->hpd_state = ST_DISCONNECTED;
+ dp_display_notify_disconnect(&dp->pdev->dev);
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ /* disable HPD plug interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
+
+ /*
+ * We don't need separate work for disconnect as
+ * connect/attention interrupts are disabled
+ */
+ dp_display_notify_disconnect(&dp->pdev->dev);
+
+ if (state == ST_DISPLAY_OFF) {
+ dp->hpd_state = ST_DISCONNECTED;
+ } else {
+ dp->hpd_state = ST_DISCONNECT_PENDING;
+ }
+
+ /* signal the disconnect event early to ensure proper teardown */
+ dp_display_handle_plugged_change(&dp->dp_display, false);
+
+ /* enable HDP plug interrupt to prepare for next plugin */
+ if (!dp->dp_display.is_edp)
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
+
+ drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
+ dp->dp_display.connector_type, state);
+
+ /* uevent will complete disconnection part */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+}
+
+static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+{
+ u32 state;
+
+ mutex_lock(&dp->event_mutex);
+
+ /* irq_hpd can happen at either connected or disconnected state */
+ state = dp->hpd_state;
+ drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
+ dp->dp_display.connector_type, state);
+
+ if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
+ dp_display_usbpd_attention_cb(&dp->pdev->dev);
+
+ drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
+ dp->dp_display.connector_type, state);
+
+ mutex_unlock(&dp->event_mutex);
+
+ return 0;
+}
+
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+ dp_debug_put(dp->debug);
+ dp_audio_put(dp->audio);
+ dp_panel_put(dp->panel);
+ dp_aux_put(dp->aux);
+}
+
+static int dp_init_sub_modules(struct dp_display_private *dp)
+{
+ int rc = 0;
+ struct device *dev = &dp->pdev->dev;
+ struct dp_usbpd_cb *cb = &dp->usbpd_cb;
+ struct dp_panel_in panel_in = {
+ .dev = dev,
+ };
+
+ /* Callback APIs used for cable status change event */
+ cb->configure = dp_display_usbpd_configure_cb;
+ cb->disconnect = dp_display_usbpd_disconnect_cb;
+ cb->attention = dp_display_usbpd_attention_cb;
+
+ dp->usbpd = dp_hpd_get(dev, cb);
+ if (IS_ERR(dp->usbpd)) {
+ rc = PTR_ERR(dp->usbpd);
+ DRM_ERROR("failed to initialize hpd, rc = %d\n", rc);
+ dp->usbpd = NULL;
+ goto error;
+ }
+
+ dp->parser = dp_parser_get(dp->pdev);
+ if (IS_ERR(dp->parser)) {
+ rc = PTR_ERR(dp->parser);
+ DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
+ dp->parser = NULL;
+ goto error;
+ }
+
+ dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+ if (IS_ERR(dp->catalog)) {
+ rc = PTR_ERR(dp->catalog);
+ DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
+ dp->catalog = NULL;
+ goto error;
+ }
+
+ dp->power = dp_power_get(dev, dp->parser);
+ if (IS_ERR(dp->power)) {
+ rc = PTR_ERR(dp->power);
+ DRM_ERROR("failed to initialize power, rc = %d\n", rc);
+ dp->power = NULL;
+ goto error;
+ }
+
+ dp->aux = dp_aux_get(dev, dp->catalog, dp->dp_display.is_edp);
+ if (IS_ERR(dp->aux)) {
+ rc = PTR_ERR(dp->aux);
+ DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
+ dp->aux = NULL;
+ goto error;
+ }
+
+ dp->link = dp_link_get(dev, dp->aux);
+ if (IS_ERR(dp->link)) {
+ rc = PTR_ERR(dp->link);
+ DRM_ERROR("failed to initialize link, rc = %d\n", rc);
+ dp->link = NULL;
+ goto error_link;
+ }
+
+ panel_in.aux = dp->aux;
+ panel_in.catalog = dp->catalog;
+ panel_in.link = dp->link;
+
+ dp->panel = dp_panel_get(&panel_in);
+ if (IS_ERR(dp->panel)) {
+ rc = PTR_ERR(dp->panel);
+ DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
+ dp->panel = NULL;
+ goto error_link;
+ }
+
+ dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
+ dp->power, dp->catalog, dp->parser);
+ if (IS_ERR(dp->ctrl)) {
+ rc = PTR_ERR(dp->ctrl);
+ DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
+ dp->ctrl = NULL;
+ goto error_ctrl;
+ }
+
+ dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog);
+ if (IS_ERR(dp->audio)) {
+ rc = PTR_ERR(dp->audio);
+ pr_err("failed to initialize audio, rc = %d\n", rc);
+ dp->audio = NULL;
+ goto error_ctrl;
+ }
+
+ /* populate wide_bus_en to differernt layers */
+ dp->ctrl->wide_bus_en = dp->wide_bus_en;
+ dp->catalog->wide_bus_en = dp->wide_bus_en;
+
+ return rc;
+
+error_ctrl:
+ dp_panel_put(dp->panel);
+error_link:
+ dp_aux_put(dp->aux);
+error:
+ return rc;
+}
+
+static int dp_display_set_mode(struct msm_dp *dp_display,
+ struct dp_display_mode *mode)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode);
+ dp->panel->dp_mode.bpp = mode->bpp;
+ dp->panel->dp_mode.capabilities = mode->capabilities;
+ dp_panel_init_panel_info(dp->panel);
+ return 0;
+}
+
+static int dp_display_enable(struct dp_display_private *dp, bool force_link_train)
+{
+ int rc = 0;
+ struct msm_dp *dp_display = &dp->dp_display;
+
+ drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count);
+ if (dp_display->power_on) {
+ drm_dbg_dp(dp->drm_dev, "Link already setup, return\n");
+ return 0;
+ }
+
+ rc = dp_ctrl_on_stream(dp->ctrl, force_link_train);
+ if (!rc)
+ dp_display->power_on = true;
+
+ return rc;
+}
+
+static int dp_display_post_enable(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ u32 rate;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ rate = dp->link->link_params.rate;
+
+ if (dp->audio_supported) {
+ dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate);
+ dp->audio->lane_count = dp->link->link_params.num_lanes;
+ }
+
+ /* signal the connect event late to synchronize video and display */
+ dp_display_handle_plugged_change(dp_display, true);
+ return 0;
+}
+
+static int dp_display_disable(struct dp_display_private *dp)
+{
+ struct msm_dp *dp_display = &dp->dp_display;
+
+ if (!dp_display->power_on)
+ return 0;
+
+ /* wait only if audio was enabled */
+ if (dp_display->audio_enabled) {
+ /* signal the disconnect event */
+ dp_display_handle_plugged_change(dp_display, false);
+ if (!wait_for_completion_timeout(&dp->audio_comp,
+ HZ * 5))
+ DRM_ERROR("audio comp timeout\n");
+ }
+
+ dp_display->audio_enabled = false;
+
+ if (dp->link->sink_count == 0) {
+ /*
+ * irq_hpd with sink_count = 0
+ * hdmi unplugged out of dongle
+ */
+ dp_ctrl_off_link_stream(dp->ctrl);
+ } else {
+ /*
+ * unplugged interrupt
+ * dongle unplugged out of DUT
+ */
+ dp_ctrl_off(dp->ctrl);
+ dp_display_host_phy_exit(dp);
+ }
+
+ dp_display->power_on = false;
+
+ drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count);
+ return 0;
+}
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+ hdmi_codec_plugged_cb fn, struct device *codec_dev)
+{
+ bool plugged;
+
+ dp_display->plugged_cb = fn;
+ dp_display->codec_dev = codec_dev;
+ plugged = dp_display->is_connected;
+ dp_display_handle_plugged_change(dp_display, plugged);
+
+ return 0;
+}
+
+/**
+ * dp_bridge_mode_valid - callback to determine if specified mode is valid
+ * @bridge: Pointer to drm bridge structure
+ * @info: display info
+ * @mode: Pointer to drm mode structure
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ const u32 num_components = 3, default_bpp = 24;
+ struct dp_display_private *dp_display;
+ struct dp_link_info *link_info;
+ u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
+ struct msm_dp *dp;
+ int mode_pclk_khz = mode->clock;
+
+ dp = to_dp_bridge(bridge)->dp_display;
+
+ if (!dp || !mode_pclk_khz || !dp->connector) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The eDP controller currently does not have a reliable way of
+ * enabling panel power to read sink capabilities. So, we rely
+ * on the panel driver to populate only supported modes for now.
+ */
+ if (dp->is_edp)
+ return MODE_OK;
+
+ if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
+ return MODE_CLOCK_HIGH;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+ link_info = &dp_display->panel->link_info;
+
+ mode_bpp = dp->connector->display_info.bpc * num_components;
+ if (!mode_bpp)
+ mode_bpp = default_bpp;
+
+ mode_bpp = dp_panel_get_mode_bpp(dp_display->panel,
+ mode_bpp, mode_pclk_khz);
+
+ mode_rate_khz = mode_pclk_khz * mode_bpp;
+ supported_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+ if (mode_rate_khz > supported_rate_khz)
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+int dp_display_get_modes(struct msm_dp *dp)
+{
+ struct dp_display_private *dp_display;
+
+ if (!dp) {
+ DRM_ERROR("invalid params\n");
+ return 0;
+ }
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ return dp_panel_get_modes(dp_display->panel,
+ dp->connector);
+}
+
+bool dp_display_check_video_test(struct msm_dp *dp)
+{
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ return dp_display->panel->video_test;
+}
+
+int dp_display_get_test_bpp(struct msm_dp *dp)
+{
+ struct dp_display_private *dp_display;
+
+ if (!dp) {
+ DRM_ERROR("invalid params\n");
+ return 0;
+ }
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ return dp_link_bit_depth_to_bpp(
+ dp_display->link->test_video.test_bit_depth);
+}
+
+void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
+{
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ /*
+ * if we are reading registers we need the link clocks to be on
+ * however till DP cable is connected this will not happen as we
+ * do not know the resolution to power up with. Hence check the
+ * power_on status before dumping DP registers to avoid crash due
+ * to unclocked access
+ */
+ mutex_lock(&dp_display->event_mutex);
+
+ if (!dp->power_on) {
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
+
+ dp_catalog_snapshot(dp_display->catalog, disp_state);
+
+ mutex_unlock(&dp_display->event_mutex);
+}
+
+static void dp_display_config_hpd(struct dp_display_private *dp)
+{
+
+ dp_display_host_init(dp);
+ dp_catalog_ctrl_hpd_config(dp->catalog);
+
+ /* Enable plug and unplug interrupts only for external DisplayPort */
+ if (!dp->dp_display.is_edp)
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_HPD_PLUG_INT_MASK |
+ DP_DP_HPD_UNPLUG_INT_MASK,
+ true);
+
+ /* Enable interrupt first time
+ * we are leaving dp clocks on during disconnect
+ * and never disable interrupt
+ */
+ enable_irq(dp->irq);
+}
+
+static int hpd_event_thread(void *data)
+{
+ struct dp_display_private *dp_priv;
+ unsigned long flag;
+ struct dp_event *todo;
+ int timeout_mode = 0;
+
+ dp_priv = (struct dp_display_private *)data;
+
+ while (1) {
+ if (timeout_mode) {
+ wait_event_timeout(dp_priv->event_q,
+ (dp_priv->event_pndx == dp_priv->event_gndx) ||
+ kthread_should_stop(), EVENT_TIMEOUT);
+ } else {
+ wait_event_interruptible(dp_priv->event_q,
+ (dp_priv->event_pndx != dp_priv->event_gndx) ||
+ kthread_should_stop());
+ }
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock_irqsave(&dp_priv->event_lock, flag);
+ todo = &dp_priv->event_list[dp_priv->event_gndx];
+ if (todo->delay) {
+ struct dp_event *todo_next;
+
+ dp_priv->event_gndx++;
+ dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+
+ /* re enter delay event into q */
+ todo_next = &dp_priv->event_list[dp_priv->event_pndx++];
+ dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+ todo_next->event_id = todo->event_id;
+ todo_next->data = todo->data;
+ todo_next->delay = todo->delay - 1;
+
+ /* clean up older event */
+ todo->event_id = EV_NO_EVENT;
+ todo->delay = 0;
+
+ /* switch to timeout mode */
+ timeout_mode = 1;
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ continue;
+ }
+
+ /* timeout with no events in q */
+ if (dp_priv->event_pndx == dp_priv->event_gndx) {
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+ continue;
+ }
+
+ dp_priv->event_gndx++;
+ dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+ timeout_mode = 0;
+ spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+ switch (todo->event_id) {
+ case EV_HPD_INIT_SETUP:
+ dp_display_config_hpd(dp_priv);
+ break;
+ case EV_HPD_PLUG_INT:
+ dp_hpd_plug_handle(dp_priv, todo->data);
+ break;
+ case EV_HPD_UNPLUG_INT:
+ dp_hpd_unplug_handle(dp_priv, todo->data);
+ break;
+ case EV_IRQ_HPD_INT:
+ dp_irq_hpd_handle(dp_priv, todo->data);
+ break;
+ case EV_USER_NOTIFICATION:
+ dp_display_send_hpd_notification(dp_priv,
+ todo->data);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
+{
+ /* set event q to empty */
+ dp_priv->event_gndx = 0;
+ dp_priv->event_pndx = 0;
+
+ dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+ if (IS_ERR(dp_priv->ev_tsk))
+ return PTR_ERR(dp_priv->ev_tsk);
+
+ return 0;
+}
+
+static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+{
+ struct dp_display_private *dp = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 hpd_isr_status;
+
+ if (!dp) {
+ DRM_ERROR("invalid data\n");
+ return IRQ_NONE;
+ }
+
+ hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+
+ if (hpd_isr_status & 0x0F) {
+ drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
+ dp->dp_display.connector_type, hpd_isr_status);
+ /* hpd related interrupts */
+ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+
+ if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
+ }
+
+ if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
+ }
+
+ if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
+ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+ }
+
+ /* DP controller isr */
+ dp_ctrl_isr(dp->ctrl);
+
+ /* DP aux isr */
+ dp_aux_isr(dp->aux);
+
+ return ret;
+}
+
+int dp_display_request_irq(struct msm_dp *dp_display)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+
+ if (!dp_display) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+ if (!dp->irq) {
+ DRM_ERROR("failed to get irq\n");
+ return -EINVAL;
+ }
+
+ rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
+ dp_display_irq_handler,
+ IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+ if (rc < 0) {
+ DRM_ERROR("failed to request IRQ%u: %d\n",
+ dp->irq, rc);
+ return rc;
+ }
+ disable_irq(dp->irq);
+
+ return 0;
+}
+
+static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev,
+ unsigned int *id)
+{
+ const struct msm_dp_config *cfg = of_device_get_match_data(&pdev->dev);
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
+
+ for (i = 0; i < cfg->num_descs; i++) {
+ if (cfg->descs[i].io_start == res->start) {
+ *id = i;
+ return &cfg->descs[i];
+ }
+ }
+
+ dev_err(&pdev->dev, "unknown displayport instance\n");
+ return NULL;
+}
+
+static int dp_display_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dp_display_private *dp;
+ const struct msm_dp_desc *desc;
+
+ if (!pdev || !pdev->dev.of_node) {
+ DRM_ERROR("pdev not found\n");
+ return -ENODEV;
+ }
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ desc = dp_display_get_desc(pdev, &dp->id);
+ if (!desc)
+ return -EINVAL;
+
+ dp->pdev = pdev;
+ dp->name = "drm_dp";
+ dp->dp_display.connector_type = desc->connector_type;
+ dp->wide_bus_en = desc->wide_bus_en;
+ dp->dp_display.is_edp =
+ (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
+
+ rc = dp_init_sub_modules(dp);
+ if (rc) {
+ DRM_ERROR("init sub module failed\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* setup event q */
+ mutex_init(&dp->event_mutex);
+ init_waitqueue_head(&dp->event_q);
+ spin_lock_init(&dp->event_lock);
+
+ /* Store DP audio handle inside DP display */
+ dp->dp_display.dp_audio = dp->audio;
+
+ init_completion(&dp->audio_comp);
+
+ platform_set_drvdata(pdev, &dp->dp_display);
+
+ rc = component_add(&pdev->dev, &dp_display_comp_ops);
+ if (rc) {
+ DRM_ERROR("component add failed, rc=%d\n", rc);
+ dp_display_deinit_sub_modules(dp);
+ }
+
+ return rc;
+}
+
+static int dp_display_remove(struct platform_device *pdev)
+{
+ struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
+
+ component_del(&pdev->dev, &dp_display_comp_ops);
+ dp_display_deinit_sub_modules(dp);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int dp_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dp *dp_display = platform_get_drvdata(pdev);
+ struct dp_display_private *dp;
+ int sink_count = 0;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ mutex_lock(&dp->event_mutex);
+
+ drm_dbg_dp(dp->drm_dev,
+ "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
+ dp->dp_display.connector_type, dp->core_initialized,
+ dp->phy_initialized, dp_display->power_on);
+
+ /* start from disconnected state */
+ dp->hpd_state = ST_DISCONNECTED;
+
+ /* turn on dp ctrl/phy */
+ dp_display_host_init(dp);
+
+ dp_catalog_ctrl_hpd_config(dp->catalog);
+
+
+ if (!dp->dp_display.is_edp)
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_HPD_PLUG_INT_MASK |
+ DP_DP_HPD_UNPLUG_INT_MASK,
+ true);
+
+ if (dp_catalog_link_is_connected(dp->catalog)) {
+ /*
+ * set sink to normal operation mode -- D0
+ * before dpcd read
+ */
+ dp_display_host_phy_init(dp);
+ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+ sink_count = drm_dp_read_sink_count(dp->aux);
+ if (sink_count < 0)
+ sink_count = 0;
+
+ dp_display_host_phy_exit(dp);
+ }
+
+ dp->link->sink_count = sink_count;
+ /*
+ * can not declared display is connected unless
+ * HDMI cable is plugged in and sink_count of
+ * dongle become 1
+ * also only signal audio when disconnected
+ */
+ if (dp->link->sink_count) {
+ dp->dp_display.is_connected = true;
+ } else {
+ dp->dp_display.is_connected = false;
+ dp_display_handle_plugged_change(dp_display, false);
+ }
+
+ drm_dbg_dp(dp->drm_dev,
+ "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n",
+ dp->dp_display.connector_type, dp->link->sink_count,
+ dp->dp_display.is_connected, dp->core_initialized,
+ dp->phy_initialized, dp_display->power_on);
+
+ mutex_unlock(&dp->event_mutex);
+
+ return 0;
+}
+
+static int dp_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dp *dp_display = platform_get_drvdata(pdev);
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ mutex_lock(&dp->event_mutex);
+
+ drm_dbg_dp(dp->drm_dev,
+ "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
+ dp->dp_display.connector_type, dp->core_initialized,
+ dp->phy_initialized, dp_display->power_on);
+
+ /* mainlink enabled */
+ if (dp_power_clk_status(dp->power, DP_CTRL_PM))
+ dp_ctrl_off_link_stream(dp->ctrl);
+
+ dp_display_host_phy_exit(dp);
+
+ /* host_init will be called at pm_resume */
+ dp_display_host_deinit(dp);
+
+ dp->hpd_state = ST_SUSPENDED;
+
+ drm_dbg_dp(dp->drm_dev,
+ "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
+ dp->dp_display.connector_type, dp->core_initialized,
+ dp->phy_initialized, dp_display->power_on);
+
+ mutex_unlock(&dp->event_mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dp_pm_ops = {
+ .suspend = dp_pm_suspend,
+ .resume = dp_pm_resume,
+};
+
+static struct platform_driver dp_display_driver = {
+ .probe = dp_display_probe,
+ .remove = dp_display_remove,
+ .driver = {
+ .name = "msm-dp-display",
+ .of_match_table = dp_dt_match,
+ .suppress_bind_attrs = true,
+ .pm = &dp_pm_ops,
+ },
+};
+
+int __init msm_dp_register(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&dp_display_driver);
+ if (ret)
+ DRM_ERROR("Dp display driver register failed");
+
+ return ret;
+}
+
+void __exit msm_dp_unregister(void)
+{
+ platform_driver_unregister(&dp_display_driver);
+}
+
+void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ if (!dp_display)
+ return;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ if (!dp_display->is_edp)
+ dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
+}
+
+bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ return dp->wide_bus_en;
+}
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+{
+ struct dp_display_private *dp;
+ struct device *dev;
+ int rc;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+ dev = &dp->pdev->dev;
+
+ dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
+ dp->link, dp->dp_display.connector,
+ minor);
+ if (IS_ERR(dp->debug)) {
+ rc = PTR_ERR(dp->debug);
+ DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
+ dp->debug = NULL;
+ }
+}
+
+static void of_dp_aux_depopulate_bus_void(void *data)
+{
+ of_dp_aux_depopulate_bus(data);
+}
+
+static int dp_display_get_next_bridge(struct msm_dp *dp)
+{
+ int rc;
+ struct dp_display_private *dp_priv;
+ struct device_node *aux_bus;
+ struct device *dev;
+
+ dp_priv = container_of(dp, struct dp_display_private, dp_display);
+ dev = &dp_priv->pdev->dev;
+ aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
+
+ if (aux_bus && dp->is_edp) {
+ dp_display_host_init(dp_priv);
+ dp_catalog_ctrl_hpd_config(dp_priv->catalog);
+ dp_display_host_phy_init(dp_priv);
+ enable_irq(dp_priv->irq);
+
+ /*
+ * The code below assumes that the panel will finish probing
+ * by the time devm_of_dp_aux_populate_ep_devices() returns.
+ * This isn't a great assumption since it will fail if the
+ * panel driver is probed asynchronously but is the best we
+ * can do without a bigger driver reorganization.
+ */
+ rc = of_dp_aux_populate_bus(dp_priv->aux, NULL);
+ of_node_put(aux_bus);
+ if (rc)
+ goto error;
+
+ rc = devm_add_action_or_reset(dp->drm_dev->dev,
+ of_dp_aux_depopulate_bus_void,
+ dp_priv->aux);
+ if (rc)
+ goto error;
+ } else if (dp->is_edp) {
+ DRM_ERROR("eDP aux_bus not found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * External bridges are mandatory for eDP interfaces: one has to
+ * provide at least an eDP panel (which gets wrapped into panel-bridge).
+ *
+ * For DisplayPort interfaces external bridges are optional, so
+ * silently ignore an error if one is not present (-ENODEV).
+ */
+ rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser);
+ if (!dp->is_edp && rc == -ENODEV)
+ return 0;
+
+ if (!rc) {
+ dp->next_bridge = dp_priv->parser->next_bridge;
+ return 0;
+ }
+
+error:
+ if (dp->is_edp) {
+ disable_irq(dp_priv->irq);
+ dp_display_host_phy_exit(dp_priv);
+ dp_display_host_deinit(dp_priv);
+ }
+ return rc;
+}
+
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv;
+ struct dp_display_private *dp_priv;
+ int ret;
+
+ if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev))
+ return -EINVAL;
+
+ priv = dev->dev_private;
+
+ if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+ DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+ return -ENOSPC;
+ }
+
+ dp_display->drm_dev = dev;
+
+ dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
+
+ ret = dp_display_request_irq(dp_display);
+ if (ret) {
+ DRM_ERROR("request_irq failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = dp_display_get_next_bridge(dp_display);
+ if (ret)
+ return ret;
+
+ dp_display->bridge = dp_bridge_init(dp_display, dev, encoder);
+ if (IS_ERR(dp_display->bridge)) {
+ ret = PTR_ERR(dp_display->bridge);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to create dp bridge: %d\n", ret);
+ dp_display->bridge = NULL;
+ return ret;
+ }
+
+ priv->bridges[priv->num_bridges++] = dp_display->bridge;
+
+ dp_display->connector = dp_drm_connector_init(dp_display, encoder);
+ if (IS_ERR(dp_display->connector)) {
+ ret = PTR_ERR(dp_display->connector);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to create dp connector: %d\n", ret);
+ dp_display->connector = NULL;
+ return ret;
+ }
+
+ dp_priv->panel->connector = dp_display->connector;
+
+ return 0;
+}
+
+void dp_bridge_enable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
+ int rc = 0;
+ struct dp_display_private *dp_display;
+ u32 state;
+ bool force_link_train = false;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+ if (!dp_display->dp_mode.drm_mode.clock) {
+ DRM_ERROR("invalid params\n");
+ return;
+ }
+
+ if (dp->is_edp)
+ dp_hpd_plug_handle(dp_display, 0);
+
+ mutex_lock(&dp_display->event_mutex);
+
+ state = dp_display->hpd_state;
+ if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
+
+ rc = dp_display_set_mode(dp, &dp_display->dp_mode);
+ if (rc) {
+ DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
+
+ state = dp_display->hpd_state;
+
+ if (state == ST_DISPLAY_OFF) {
+ dp_display_host_phy_init(dp_display);
+ force_link_train = true;
+ }
+
+ dp_display_enable(dp_display, force_link_train);
+
+ rc = dp_display_post_enable(dp);
+ if (rc) {
+ DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
+ dp_display_disable(dp_display);
+ }
+
+ /* completed connection */
+ dp_display->hpd_state = ST_CONNECTED;
+
+ drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
+ mutex_unlock(&dp_display->event_mutex);
+}
+
+void dp_bridge_disable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ dp_ctrl_push_idle(dp_display->ctrl);
+}
+
+void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
+ u32 state;
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ if (dp->is_edp)
+ dp_hpd_unplug_handle(dp_display, 0);
+
+ mutex_lock(&dp_display->event_mutex);
+
+ state = dp_display->hpd_state;
+ if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) {
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
+
+ dp_display_disable(dp_display);
+
+ state = dp_display->hpd_state;
+ if (state == ST_DISCONNECT_PENDING) {
+ /* completed disconnection */
+ dp_display->hpd_state = ST_DISCONNECTED;
+ } else {
+ dp_display->hpd_state = ST_DISPLAY_OFF;
+ }
+
+ drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
+ mutex_unlock(&dp_display->event_mutex);
+}
+
+void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
+ struct dp_display_private *dp_display;
+
+ dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+ memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
+
+ if (dp_display_check_video_test(dp))
+ dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp);
+ else /* Default num_components per pixel = 3 */
+ dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3;
+
+ if (!dp_display->dp_mode.bpp)
+ dp_display->dp_mode.bpp = 24; /* Default bpp */
+
+ drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode);
+
+ dp_display->dp_mode.v_active_low =
+ !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
+
+ dp_display->dp_mode.h_active_low =
+ !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
new file mode 100644
index 000000000..dcedf021f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DISPLAY_H_
+#define _DP_DISPLAY_H_
+
+#include "dp_panel.h"
+#include <sound/hdmi-codec.h>
+#include "disp/msm_disp_snapshot.h"
+
+struct msm_dp {
+ struct drm_device *drm_dev;
+ struct device *codec_dev;
+ struct drm_bridge *bridge;
+ struct drm_connector *connector;
+ struct drm_bridge *next_bridge;
+ bool is_connected;
+ bool audio_enabled;
+ bool power_on;
+ unsigned int connector_type;
+ bool is_edp;
+
+ hdmi_codec_plugged_cb plugged_cb;
+
+ bool wide_bus_en;
+
+ u32 max_dp_lanes;
+ struct dp_audio *dp_audio;
+};
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+ hdmi_codec_plugged_cb fn, struct device *codec_dev);
+int dp_display_get_modes(struct msm_dp *dp_display);
+int dp_display_request_irq(struct msm_dp *dp_display);
+bool dp_display_check_video_test(struct msm_dp *dp_display);
+int dp_display_get_test_bpp(struct msm_dp *dp_display);
+void dp_display_signal_audio_start(struct msm_dp *dp_display);
+void dp_display_signal_audio_complete(struct msm_dp *dp_display);
+
+#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
new file mode 100644
index 000000000..6db82f9b0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_drm.h"
+
+/**
+ * dp_bridge_detect - callback to determine if connector is connected
+ * @bridge: Pointer to drm bridge structure
+ * Returns: Bridge's 'is connected' status
+ */
+static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
+{
+ struct msm_dp *dp;
+
+ dp = to_dp_bridge(bridge)->dp_display;
+
+ drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
+ (dp->is_connected) ? "true" : "false");
+
+ return (dp->is_connected) ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct msm_dp *dp;
+
+ dp = to_dp_bridge(bridge)->dp_display;
+
+ drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
+ (dp->is_connected) ? "true" : "false");
+
+ /*
+ * There is no protection in the DRM framework to check if the display
+ * pipeline has been already disabled before trying to disable it again.
+ * Hence if the sink is unplugged, the pipeline gets disabled, but the
+ * crtc->active is still true. Any attempt to set the mode or manually
+ * disable this encoder will result in the crash.
+ *
+ * TODO: add support for telling the DRM subsystem that the pipeline is
+ * disabled by the hardware and thus all access to it should be forbidden.
+ * After that this piece of code can be removed.
+ */
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ return (dp->is_connected) ? 0 : -ENOTCONN;
+
+ return 0;
+}
+
+
+/**
+ * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @bridge: Poiner to drm bridge
+ * @connector: Pointer to drm connector structure
+ * Returns: Number of modes added
+ */
+static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
+{
+ int rc = 0;
+ struct msm_dp *dp;
+
+ if (!connector)
+ return 0;
+
+ dp = to_dp_bridge(bridge)->dp_display;
+
+ /* pluggable case assumes EDID is read when HPD */
+ if (dp->is_connected) {
+ rc = dp_display_get_modes(dp);
+ if (rc <= 0) {
+ DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ drm_dbg_dp(connector->dev, "No sink connected\n");
+ }
+ return rc;
+}
+
+static const struct drm_bridge_funcs dp_bridge_ops = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .enable = dp_bridge_enable,
+ .disable = dp_bridge_disable,
+ .post_disable = dp_bridge_post_disable,
+ .mode_set = dp_bridge_mode_set,
+ .mode_valid = dp_bridge_mode_valid,
+ .get_modes = dp_bridge_get_modes,
+ .detect = dp_bridge_detect,
+ .atomic_check = dp_bridge_atomic_check,
+};
+
+struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ int rc;
+ struct msm_dp_bridge *dp_bridge;
+ struct drm_bridge *bridge;
+
+ dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL);
+ if (!dp_bridge)
+ return ERR_PTR(-ENOMEM);
+
+ dp_bridge->dp_display = dp_display;
+
+ bridge = &dp_bridge->bridge;
+ bridge->funcs = &dp_bridge_ops;
+ bridge->type = dp_display->connector_type;
+
+ /*
+ * Many ops only make sense for DP. Why?
+ * - Detect/HPD are used by DRM to know if a display is _physically_
+ * there, not whether the display is powered on / finished initting.
+ * On eDP we assume the display is always there because you can't
+ * know until power is applied. If we don't implement the ops DRM will
+ * assume our display is always there.
+ * - Currently eDP mode reading is driven by the panel driver. This
+ * allows the panel driver to properly power itself on to read the
+ * modes.
+ */
+ if (!dp_display->is_edp) {
+ bridge->ops =
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_MODES;
+ }
+
+ drm_bridge_add(bridge);
+
+ rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (rc) {
+ DRM_ERROR("failed to attach bridge, rc=%d\n", rc);
+ drm_bridge_remove(bridge);
+
+ return ERR_PTR(rc);
+ }
+
+ if (dp_display->next_bridge) {
+ rc = drm_bridge_attach(encoder,
+ dp_display->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (rc < 0) {
+ DRM_ERROR("failed to attach panel bridge: %d\n", rc);
+ drm_bridge_remove(bridge);
+ return ERR_PTR(rc);
+ }
+ }
+
+ return bridge;
+}
+
+/* connector initialization */
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder)
+{
+ struct drm_connector *connector = NULL;
+
+ connector = drm_bridge_connector_init(dp_display->drm_dev, encoder);
+ if (IS_ERR(connector))
+ return connector;
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
new file mode 100644
index 000000000..82035dbb0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DRM_H_
+#define _DP_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drm_bridge.h>
+
+#include "msm_drv.h"
+#include "dp_display.h"
+
+struct msm_dp_bridge {
+ struct drm_bridge bridge;
+ struct msm_dp *dp_display;
+};
+
+#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
+
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder);
+struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder);
+
+void dp_bridge_enable(struct drm_bridge *drm_bridge);
+void dp_bridge_disable(struct drm_bridge *drm_bridge);
+void dp_bridge_post_disable(struct drm_bridge *drm_bridge);
+enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode);
+void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+
+#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
new file mode 100644
index 000000000..db98a1d43
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "dp_hpd.h"
+
+/* DP specific VDM commands */
+#define DP_USBPD_VDM_STATUS 0x10
+#define DP_USBPD_VDM_CONFIGURE 0x11
+
+/* USBPD-TypeC specific Macros */
+#define VDM_VERSION 0x0
+#define USB_C_DP_SID 0xFF01
+
+struct dp_hpd_private {
+ struct device *dev;
+ struct dp_usbpd_cb *dp_cb;
+ struct dp_usbpd dp_usbpd;
+};
+
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+{
+ int rc = 0;
+ struct dp_hpd_private *hpd_priv;
+
+ hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
+ dp_usbpd);
+
+ if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
+ || !hpd_priv->dp_cb->disconnect) {
+ pr_err("hpd dp_cb not initialized\n");
+ return -EINVAL;
+ }
+ if (hpd)
+ hpd_priv->dp_cb->configure(hpd_priv->dev);
+ else
+ hpd_priv->dp_cb->disconnect(hpd_priv->dev);
+
+ return rc;
+}
+
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb)
+{
+ struct dp_hpd_private *dp_hpd;
+
+ if (!cb) {
+ pr_err("invalid cb data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL);
+ if (!dp_hpd)
+ return ERR_PTR(-ENOMEM);
+
+ dp_hpd->dev = dev;
+ dp_hpd->dp_cb = cb;
+
+ dp_hpd->dp_usbpd.connect = dp_hpd_connect;
+
+ return &dp_hpd->dp_usbpd;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
new file mode 100644
index 000000000..8feec5aa5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_HPD_H_
+#define _DP_HPD_H_
+
+//#include <linux/usb/usbpd.h>
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum plug_orientation {
+ ORIENTATION_NONE,
+ ORIENTATION_CC1,
+ ORIENTATION_CC2,
+};
+
+/**
+ * struct dp_usbpd - DisplayPort status
+ *
+ * @orientation: plug orientation configuration
+ * @low_pow_st: low power state
+ * @adaptor_dp_en: adaptor functionality enabled
+ * @multi_func: multi-function preferred
+ * @usb_config_req: request to switch to usb
+ * @exit_dp_mode: request exit from displayport mode
+ * @hpd_irq: Change in the status since last message
+ * @alt_mode_cfg_done: bool to specify alt mode status
+ * @debug_en: bool to specify debug mode
+ * @connect: simulate disconnect or connect for debug mode
+ */
+struct dp_usbpd {
+ enum plug_orientation orientation;
+ bool low_pow_st;
+ bool adaptor_dp_en;
+ bool multi_func;
+ bool usb_config_req;
+ bool exit_dp_mode;
+ bool hpd_irq;
+ bool alt_mode_cfg_done;
+ bool debug_en;
+
+ int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+};
+
+/**
+ * struct dp_usbpd_cb - callback functions provided by the client
+ *
+ * @configure: called by usbpd module when PD communication has
+ * been completed and the usb peripheral has been configured on
+ * dp mode.
+ * @disconnect: notify the cable disconnect issued by usb.
+ * @attention: notify any attention message issued by usb.
+ */
+struct dp_usbpd_cb {
+ int (*configure)(struct device *dev);
+ int (*disconnect)(struct device *dev);
+ int (*attention)(struct device *dev);
+};
+
+/**
+ * dp_hpd_get() - setup hpd module
+ *
+ * @dev: device instance of the caller
+ * @cb: struct containing callback function pointers.
+ *
+ * This function allows the client to initialize the usbpd
+ * module. The module will communicate with HPD module.
+ */
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb);
+
+int dp_hpd_register(struct dp_usbpd *dp_usbpd);
+void dp_hpd_unregister(struct dp_usbpd *dp_usbpd);
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd);
+
+#endif /* _DP_HPD_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
new file mode 100644
index 000000000..cb66d1126
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -0,0 +1,1223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <drm/drm_print.h>
+
+#include "dp_link.h"
+#include "dp_panel.h"
+
+#define DP_TEST_REQUEST_MASK 0x7F
+
+enum audio_sample_rate {
+ AUDIO_SAMPLE_RATE_32_KHZ = 0x00,
+ AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01,
+ AUDIO_SAMPLE_RATE_48_KHZ = 0x02,
+ AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03,
+ AUDIO_SAMPLE_RATE_96_KHZ = 0x04,
+ AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05,
+ AUDIO_SAMPLE_RATE_192_KHZ = 0x06,
+};
+
+enum audio_pattern_type {
+ AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00,
+ AUDIO_TEST_PATTERN_SAWTOOTH = 0x01,
+};
+
+struct dp_link_request {
+ u32 test_requested;
+ u32 test_link_rate;
+ u32 test_lane_count;
+};
+
+struct dp_link_private {
+ u32 prev_sink_count;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_dp_aux *aux;
+ struct dp_link dp_link;
+
+ struct dp_link_request request;
+ struct mutex psm_mutex;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+};
+
+static int dp_aux_link_power_up(struct drm_dp_aux *aux,
+ struct dp_link_info *link)
+{
+ u8 value;
+ int err;
+
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int dp_aux_link_power_down(struct drm_dp_aux *aux,
+ struct dp_link_info *link)
+{
+ u8 value;
+ int err;
+
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp_link_get_period(struct dp_link_private *link, int const addr)
+{
+ int ret = 0;
+ u8 data;
+ u32 const max_audio_period = 0xA;
+
+ /* TEST_AUDIO_PERIOD_CH_XX */
+ if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) {
+ DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Period - Bits 3:0 */
+ data = data & 0xF;
+ if ((int)data > max_audio_period) {
+ DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = data;
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+{
+ int ret = 0;
+ struct dp_link_test_audio *req = &link->dp_link.test_audio;
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_1 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_2 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret);
+
+ /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_3 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_4 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_5 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_6 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_7 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret);
+
+ ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
+ if (ret == -EINVAL)
+ goto exit;
+
+ req->test_audio_period_ch_8 = ret;
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_8 = 0x%x\n", ret);
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+{
+ int ret = 0;
+ u8 data;
+ ssize_t rlen;
+ int const max_audio_pattern_type = 0x1;
+
+ rlen = drm_dp_dpcd_readb(link->aux,
+ DP_TEST_AUDIO_PATTERN_TYPE, &data);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ /* Audio Pattern Type - Bits 7:0 */
+ if ((int)data > max_audio_pattern_type) {
+ DRM_ERROR("invalid audio pattern type = 0x%x\n", data);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ link->dp_link.test_audio.test_audio_pattern_type = data;
+ drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data);
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_mode(struct dp_link_private *link)
+{
+ int ret = 0;
+ u8 data;
+ ssize_t rlen;
+ int const max_audio_sampling_rate = 0x6;
+ int const max_audio_channel_count = 0x8;
+ int sampling_rate = 0x0;
+ int channel_count = 0x0;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ /* Sampling Rate - Bits 3:0 */
+ sampling_rate = data & 0xF;
+ if (sampling_rate > max_audio_sampling_rate) {
+ DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n",
+ sampling_rate, max_audio_sampling_rate);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Channel Count - Bits 7:4 */
+ channel_count = ((data & 0xF0) >> 4) + 1;
+ if (channel_count > max_audio_channel_count) {
+ DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n",
+ channel_count, max_audio_channel_count);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
+ link->dp_link.test_audio.test_audio_channel_count = channel_count;
+ drm_dbg_dp(link->drm_dev,
+ "sampling_rate = 0x%x, channel_count = 0x%x\n",
+ sampling_rate, channel_count);
+exit:
+ return ret;
+}
+
+static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+{
+ int ret = 0;
+
+ ret = dp_link_parse_audio_mode(link);
+ if (ret)
+ goto exit;
+
+ ret = dp_link_parse_audio_pattern_type(link);
+ if (ret)
+ goto exit;
+
+ ret = dp_link_parse_audio_channel_period(link);
+
+exit:
+ return ret;
+}
+
+static bool dp_link_is_video_pattern_valid(u32 pattern)
+{
+ switch (pattern) {
+ case DP_NO_TEST_PATTERN:
+ case DP_COLOR_RAMP:
+ case DP_BLACK_AND_WHITE_VERTICAL_LINES:
+ case DP_COLOR_SQUARE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * @tbd: bit depth requested by the sink
+ *
+ * Returns true if the requested bit depth is supported.
+ */
+static bool dp_link_is_bit_depth_valid(u32 tbd)
+{
+ /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
+ switch (tbd) {
+ case DP_TEST_BIT_DEPTH_6:
+ case DP_TEST_BIT_DEPTH_8:
+ case DP_TEST_BIT_DEPTH_10:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int dp_link_parse_timing_params1(struct dp_link_private *link,
+ int addr, int len, u32 *val)
+{
+ u8 bp[2];
+ int rlen;
+
+ if (len != 2)
+ return -EINVAL;
+
+ /* Read the requested video link pattern (Byte 0x221). */
+ rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+ if (rlen < len) {
+ DRM_ERROR("failed to read 0x%x\n", addr);
+ return -EINVAL;
+ }
+
+ *val = bp[1] | (bp[0] << 8);
+
+ return 0;
+}
+
+static int dp_link_parse_timing_params2(struct dp_link_private *link,
+ int addr, int len,
+ u32 *val1, u32 *val2)
+{
+ u8 bp[2];
+ int rlen;
+
+ if (len != 2)
+ return -EINVAL;
+
+ /* Read the requested video link pattern (Byte 0x221). */
+ rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+ if (rlen < len) {
+ DRM_ERROR("failed to read 0x%x\n", addr);
+ return -EINVAL;
+ }
+
+ *val1 = (bp[0] & BIT(7)) >> 7;
+ *val2 = bp[1] | ((bp[0] & 0x7F) << 8);
+
+ return 0;
+}
+
+static int dp_link_parse_timing_params3(struct dp_link_private *link,
+ int addr, u32 *val)
+{
+ u8 bp;
+ u32 len = 1;
+ int rlen;
+
+ rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len);
+ if (rlen < 1) {
+ DRM_ERROR("failed to read 0x%x\n", addr);
+ return -EINVAL;
+ }
+ *val = bp;
+
+ return 0;
+}
+
+/**
+ * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the video link pattern and the link
+ * bit depth requested by the sink and, and if the values parsed are valid.
+ */
+static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+{
+ int ret = 0;
+ ssize_t rlen;
+ u8 bp;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link video pattern. rlen=%zd\n",
+ rlen);
+ return rlen;
+ }
+
+ if (!dp_link_is_video_pattern_valid(bp)) {
+ DRM_ERROR("invalid link video pattern = 0x%x\n", bp);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ link->dp_link.test_video.test_video_pattern = bp;
+
+ /* Read the requested color bit depth and dynamic range (Byte 0x232) */
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ /* Dynamic Range */
+ link->dp_link.test_video.test_dyn_range =
+ (bp & DP_TEST_DYNAMIC_RANGE_CEA);
+
+ /* Color bit depth */
+ bp &= DP_TEST_BIT_DEPTH_MASK;
+ if (!dp_link_is_bit_depth_valid(bp)) {
+ DRM_ERROR("invalid link bit depth = 0x%x\n", bp);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ link->dp_link.test_video.test_bit_depth = bp;
+
+ /* resolution timing params */
+ ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
+ &link->dp_link.test_video.test_h_total);
+ if (ret) {
+ DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
+ &link->dp_link.test_video.test_v_total);
+ if (ret) {
+ DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
+ &link->dp_link.test_video.test_h_start);
+ if (ret) {
+ DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
+ &link->dp_link.test_video.test_v_start);
+ if (ret) {
+ DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
+ &link->dp_link.test_video.test_hsync_pol,
+ &link->dp_link.test_video.test_hsync_width);
+ if (ret) {
+ DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
+ &link->dp_link.test_video.test_vsync_pol,
+ &link->dp_link.test_video.test_vsync_width);
+ if (ret) {
+ DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
+ &link->dp_link.test_video.test_h_width);
+ if (ret) {
+ DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
+ &link->dp_link.test_video.test_v_height);
+ if (ret) {
+ DRM_ERROR("failed to parse test_v_height\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
+ &link->dp_link.test_video.test_rr_d);
+ link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
+ if (ret) {
+ DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n");
+ return ret;
+ }
+
+ ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
+ &link->dp_link.test_video.test_rr_n);
+ if (ret) {
+ DRM_ERROR("failed to parse test_rr_n\n");
+ return ret;
+ }
+
+ drm_dbg_dp(link->drm_dev,
+ "link video pattern = 0x%x\n"
+ "link dynamic range = 0x%x\n"
+ "link bit depth = 0x%x\n"
+ "TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n"
+ "TEST_H_START = %d, TEST_V_START = %d\n"
+ "TEST_HSYNC_POL = %d\n"
+ "TEST_HSYNC_WIDTH = %d\n"
+ "TEST_VSYNC_POL = %d\n"
+ "TEST_VSYNC_WIDTH = %d\n"
+ "TEST_H_WIDTH = %d\n"
+ "TEST_V_HEIGHT = %d\n"
+ "TEST_REFRESH_DENOMINATOR = %d\n"
+ "TEST_REFRESH_NUMERATOR = %d\n",
+ link->dp_link.test_video.test_video_pattern,
+ link->dp_link.test_video.test_dyn_range,
+ link->dp_link.test_video.test_bit_depth,
+ link->dp_link.test_video.test_h_total,
+ link->dp_link.test_video.test_v_total,
+ link->dp_link.test_video.test_h_start,
+ link->dp_link.test_video.test_v_start,
+ link->dp_link.test_video.test_hsync_pol,
+ link->dp_link.test_video.test_hsync_width,
+ link->dp_link.test_video.test_vsync_pol,
+ link->dp_link.test_video.test_vsync_width,
+ link->dp_link.test_video.test_h_width,
+ link->dp_link.test_video.test_v_height,
+ link->dp_link.test_video.test_rr_d,
+ link->dp_link.test_video.test_rr_n);
+
+ return ret;
+}
+
+/**
+ * dp_link_parse_link_training_params() - parses link training parameters from
+ * DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_link_parse_link_training_params(struct dp_link_private *link)
+{
+ u8 bp;
+ ssize_t rlen;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ if (!is_link_rate_valid(bp)) {
+ DRM_ERROR("invalid link rate = 0x%x\n", bp);
+ return -EINVAL;
+ }
+
+ link->request.test_link_rate = bp;
+ drm_dbg_dp(link->drm_dev, "link rate = 0x%x\n",
+ link->request.test_link_rate);
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen);
+ return rlen;
+ }
+ bp &= DP_MAX_LANE_COUNT_MASK;
+
+ if (!is_lane_count_valid(bp)) {
+ DRM_ERROR("invalid lane count = 0x%x\n", bp);
+ return -EINVAL;
+ }
+
+ link->request.test_lane_count = bp;
+ drm_dbg_dp(link->drm_dev, "lane count = 0x%x\n",
+ link->request.test_lane_count);
+ return 0;
+}
+
+/**
+ * dp_link_parse_phy_test_params() - parses the phy link parameters
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
+ * requested.
+ */
+static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+{
+ u8 data;
+ ssize_t rlen;
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN,
+ &data);
+ if (rlen < 0) {
+ DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
+
+ drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data);
+
+ switch (data) {
+ case DP_PHY_TEST_PATTERN_SEL_MASK:
+ case DP_PHY_TEST_PATTERN_NONE:
+ case DP_PHY_TEST_PATTERN_D10_2:
+ case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+ case DP_PHY_TEST_PATTERN_PRBS7:
+ case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_PHY_TEST_PATTERN_CP2520:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * @link: link requested by the sink
+ *
+ * Returns true if the requested link is a permitted audio/video link.
+ */
+static bool dp_link_is_video_audio_test_requested(u32 link)
+{
+ u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN |
+ DP_TEST_LINK_AUDIO_PATTERN |
+ DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+
+ return ((link & video_audio_test) &&
+ !(link & ~video_audio_test));
+}
+
+/**
+ * dp_link_parse_request() - parses link request parameters from sink
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated link is requested (Byte 0x201),
+ * and what type of link automation is being requested (Byte 0x218).
+ */
+static int dp_link_parse_request(struct dp_link_private *link)
+{
+ int ret = 0;
+ u8 data;
+ ssize_t rlen;
+
+ /**
+ * Read the device service IRQ vector (Byte 0x201) to determine
+ * whether an automated link has been requested by the sink.
+ */
+ rlen = drm_dp_dpcd_readb(link->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &data);
+ if (rlen < 0) {
+ DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ drm_dbg_dp(link->drm_dev, "device service irq vector = 0x%x\n", data);
+
+ if (!(data & DP_AUTOMATED_TEST_REQUEST)) {
+ drm_dbg_dp(link->drm_dev, "no test requested\n");
+ return 0;
+ }
+
+ /**
+ * Read the link request byte (Byte 0x218) to determine what type
+ * of automated link has been requested by the sink.
+ */
+ rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data);
+ if (rlen < 0) {
+ DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) {
+ drm_dbg_dp(link->drm_dev, "link 0x%x not supported\n", data);
+ goto end;
+ }
+
+ drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data);
+ link->request.test_requested = data;
+ if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
+ ret = dp_link_parse_phy_test_params(link);
+ if (ret)
+ goto end;
+ ret = dp_link_parse_link_training_params(link);
+ if (ret)
+ goto end;
+ }
+
+ if (link->request.test_requested == DP_TEST_LINK_TRAINING) {
+ ret = dp_link_parse_link_training_params(link);
+ if (ret)
+ goto end;
+ }
+
+ if (dp_link_is_video_audio_test_requested(
+ link->request.test_requested)) {
+ ret = dp_link_parse_video_pattern_params(link);
+ if (ret)
+ goto end;
+
+ ret = dp_link_parse_audio_pattern_params(link);
+ }
+end:
+ /*
+ * Send a DP_TEST_ACK if all link parameters are valid, otherwise send
+ * a DP_TEST_NAK.
+ */
+ if (ret) {
+ link->dp_link.test_response = DP_TEST_NAK;
+ } else {
+ if (link->request.test_requested != DP_TEST_LINK_EDID_READ)
+ link->dp_link.test_response = DP_TEST_ACK;
+ else
+ link->dp_link.test_response =
+ DP_TEST_EDID_CHECKSUM_WRITE;
+ }
+
+ return ret;
+}
+
+/**
+ * dp_link_parse_sink_count() - parses the sink count
+ * @dp_link: pointer to link module data
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static int dp_link_parse_sink_count(struct dp_link *dp_link)
+{
+ ssize_t rlen;
+ bool cp_ready;
+
+ struct dp_link_private *link = container_of(dp_link,
+ struct dp_link_private, dp_link);
+
+ rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT,
+ &link->dp_link.sink_count);
+ if (rlen < 0) {
+ DRM_ERROR("sink count read failed. rlen=%zd\n", rlen);
+ return rlen;
+ }
+
+ cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY;
+
+ link->dp_link.sink_count =
+ DP_GET_SINK_COUNT(link->dp_link.sink_count);
+
+ drm_dbg_dp(link->drm_dev, "sink_count = 0x%x, cp_ready = 0x%x\n",
+ link->dp_link.sink_count, cp_ready);
+ return 0;
+}
+
+static int dp_link_parse_sink_status_field(struct dp_link_private *link)
+{
+ int len = 0;
+
+ link->prev_sink_count = link->dp_link.sink_count;
+ len = dp_link_parse_sink_count(&link->dp_link);
+ if (len < 0) {
+ DRM_ERROR("DP parse sink count failed\n");
+ return len;
+ }
+
+ len = drm_dp_dpcd_read_link_status(link->aux,
+ link->link_status);
+ if (len < DP_LINK_STATUS_SIZE) {
+ DRM_ERROR("DP link status read failed\n");
+ return len;
+ }
+
+ return dp_link_parse_request(link);
+}
+
+/**
+ * dp_link_process_link_training_request() - processes new training requests
+ * @link: Display Port link data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * rate, and then trigger the link retraining procedure.
+ *
+ * The function will return 0 if a link training request has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_training_request(struct dp_link_private *link)
+{
+ if (link->request.test_requested != DP_TEST_LINK_TRAINING)
+ return -EINVAL;
+
+ drm_dbg_dp(link->drm_dev,
+ "Test:0x%x link rate = 0x%x, lane count = 0x%x\n",
+ DP_TEST_LINK_TRAINING,
+ link->request.test_link_rate,
+ link->request.test_lane_count);
+
+ link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->dp_link.link_params.rate =
+ drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
+
+ return 0;
+}
+
+bool dp_link_send_test_response(struct dp_link *dp_link)
+{
+ struct dp_link_private *link = NULL;
+ int ret = 0;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return false;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE,
+ dp_link->test_response);
+
+ return ret == 1;
+}
+
+int dp_link_psm_config(struct dp_link *dp_link,
+ struct dp_link_info *link_info, bool enable)
+{
+ struct dp_link_private *link = NULL;
+ int ret = 0;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ mutex_lock(&link->psm_mutex);
+ if (enable)
+ ret = dp_aux_link_power_down(link->aux, link_info);
+ else
+ ret = dp_aux_link_power_up(link->aux, link_info);
+
+ if (ret)
+ DRM_ERROR("Failed to %s low power mode\n", enable ?
+ "enter" : "exit");
+ else
+ dp_link->psm_enabled = enable;
+
+ mutex_unlock(&link->psm_mutex);
+ return ret;
+}
+
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
+{
+ struct dp_link_private *link = NULL;
+ int ret = 0;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return false;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM,
+ checksum);
+ return ret == 1;
+}
+
+static void dp_link_parse_vx_px(struct dp_link_private *link)
+{
+ drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
+ drm_dp_get_adjust_request_voltage(link->link_status, 0),
+ drm_dp_get_adjust_request_voltage(link->link_status, 1),
+ drm_dp_get_adjust_request_voltage(link->link_status, 2),
+ drm_dp_get_adjust_request_voltage(link->link_status, 3));
+
+ drm_dbg_dp(link->drm_dev, "px: 0=%d, 1=%d, 2=%d, 3=%d\n",
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0),
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1),
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2),
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3));
+
+ /**
+ * Update the voltage and pre-emphasis levels as per DPCD request
+ * vector.
+ */
+ drm_dbg_dp(link->drm_dev,
+ "Current: v_level = 0x%x, p_level = 0x%x\n",
+ link->dp_link.phy_params.v_level,
+ link->dp_link.phy_params.p_level);
+ link->dp_link.phy_params.v_level =
+ drm_dp_get_adjust_request_voltage(link->link_status, 0);
+ link->dp_link.phy_params.p_level =
+ drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
+
+ link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ drm_dbg_dp(link->drm_dev,
+ "Requested: v_level = 0x%x, p_level = 0x%x\n",
+ link->dp_link.phy_params.v_level,
+ link->dp_link.phy_params.p_level);
+}
+
+/**
+ * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * @link: Display Port Driver data
+ *
+ * This function will handle new phy link pattern requests that are initiated
+ * by the sink. The function will return 0 if a phy link pattern has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_phy_test_pattern_request(
+ struct dp_link_private *link)
+{
+ if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
+ drm_dbg_dp(link->drm_dev, "no phy test\n");
+ return -EINVAL;
+ }
+
+ if (!is_link_rate_valid(link->request.test_link_rate) ||
+ !is_lane_count_valid(link->request.test_lane_count)) {
+ DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n",
+ link->request.test_link_rate,
+ link->request.test_lane_count);
+ return -EINVAL;
+ }
+
+ drm_dbg_dp(link->drm_dev,
+ "Current: rate = 0x%x, lane count = 0x%x\n",
+ link->dp_link.link_params.rate,
+ link->dp_link.link_params.num_lanes);
+
+ drm_dbg_dp(link->drm_dev,
+ "Requested: rate = 0x%x, lane count = 0x%x\n",
+ link->request.test_link_rate,
+ link->request.test_lane_count);
+
+ link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+ link->dp_link.link_params.rate =
+ drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
+
+ dp_link_parse_vx_px(link);
+
+ return 0;
+}
+
+static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+/**
+ * dp_link_process_link_status_update() - processes link status updates
+ * @link: Display Port link module data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ *
+ * The function will return 0 if the a link status update has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_status_update(struct dp_link_private *link)
+{
+ bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status,
+ link->dp_link.link_params.num_lanes);
+
+ bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.link_params.num_lanes);
+
+ drm_dbg_dp(link->drm_dev,
+ "channel_eq_done = %d, clock_recovery_done = %d\n",
+ channel_eq_done, clock_recovery_done);
+
+ if (channel_eq_done && clock_recovery_done)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * dp_link_process_ds_port_status_change() - process port status changes
+ * @link: Display Port Driver data
+ *
+ * This function will handle downstream port updates that are initiated by
+ * the sink. If the downstream port status has changed, the EDID is read via
+ * AUX.
+ *
+ * The function will return 0 if a downstream port update has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+{
+ if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
+ DP_DOWNSTREAM_PORT_STATUS_CHANGED)
+ goto reset;
+
+ if (link->prev_sink_count == link->dp_link.sink_count)
+ return -EINVAL;
+
+reset:
+ /* reset prev_sink_count */
+ link->prev_sink_count = link->dp_link.sink_count;
+
+ return 0;
+}
+
+static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+{
+ return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
+ && !(link->request.test_requested &
+ DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+}
+
+static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+{
+ return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
+}
+
+static void dp_link_reset_data(struct dp_link_private *link)
+{
+ link->request = (const struct dp_link_request){ 0 };
+ link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
+ link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+ link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
+ link->dp_link.phy_params.phy_test_pattern_sel = 0;
+ link->dp_link.sink_request = 0;
+ link->dp_link.test_response = 0;
+}
+
+/**
+ * dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @dp_link: pointer to link module data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+int dp_link_process_request(struct dp_link *dp_link)
+{
+ int ret = 0;
+ struct dp_link_private *link;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ dp_link_reset_data(link);
+
+ ret = dp_link_parse_sink_status_field(link);
+ if (ret)
+ return ret;
+
+ if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
+ dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
+ } else if (!dp_link_process_ds_port_status_change(link)) {
+ dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
+ } else if (!dp_link_process_link_training_request(link)) {
+ dp_link->sink_request |= DP_TEST_LINK_TRAINING;
+ } else if (!dp_link_process_phy_test_pattern_request(link)) {
+ dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
+ } else {
+ ret = dp_link_process_link_status_update(link);
+ if (!ret) {
+ dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+ } else {
+ if (dp_link_is_video_pattern_requested(link)) {
+ ret = 0;
+ dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+ }
+ if (dp_link_is_audio_pattern_requested(link)) {
+ dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
+ dp_link->sink_request);
+ return ret;
+}
+
+int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+{
+ u32 cc;
+ struct dp_link_private *link;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ /*
+ * Unless a video pattern CTS test is ongoing, use RGB_VESA
+ * Only RGB_VESA and RGB_CEA supported for now
+ */
+ if (dp_link_is_video_pattern_requested(link))
+ cc = link->dp_link.test_video.test_dyn_range;
+ else
+ cc = DP_TEST_DYNAMIC_RANGE_VESA;
+
+ return cc;
+}
+
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+{
+ int i;
+ int v_max = 0, p_max = 0;
+ struct dp_link_private *link;
+
+ if (!dp_link) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
+ /* use the max level across lanes */
+ for (i = 0; i < dp_link->link_params.num_lanes; i++) {
+ u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
+ u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
+ i);
+ drm_dbg_dp(link->drm_dev,
+ "lane=%d req_vol_swing=%d req_pre_emphasis=%d\n",
+ i, data_v, data_p);
+ if (v_max < data_v)
+ v_max = data_v;
+ if (p_max < data_p)
+ p_max = data_p;
+ }
+
+ dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ /**
+ * Adjust the voltage swing and pre-emphasis level combination to within
+ * the allowable range.
+ */
+ if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
+ drm_dbg_dp(link->drm_dev,
+ "Requested vSwingLevel=%d, change to %d\n",
+ dp_link->phy_params.v_level,
+ DP_TRAIN_VOLTAGE_SWING_MAX);
+ dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
+ }
+
+ if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
+ drm_dbg_dp(link->drm_dev,
+ "Requested preEmphasisLevel=%d, change to %d\n",
+ dp_link->phy_params.p_level,
+ DP_TRAIN_PRE_EMPHASIS_MAX);
+ dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
+ }
+
+ if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
+ && (dp_link->phy_params.v_level ==
+ DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
+ drm_dbg_dp(link->drm_dev,
+ "Requested preEmphasisLevel=%d, change to %d\n",
+ dp_link->phy_params.p_level,
+ DP_TRAIN_PRE_EMPHASIS_LVL_1);
+ dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
+ }
+
+ drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
+ dp_link->phy_params.v_level, dp_link->phy_params.p_level);
+
+ return 0;
+}
+
+void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
+{
+ dp_link->phy_params.v_level = 0;
+ dp_link->phy_params.p_level = 0;
+}
+
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+{
+ u32 tbd;
+
+ /*
+ * Few simplistic rules and assumptions made here:
+ * 1. Test bit depth is bit depth per color component
+ * 2. Assume 3 color components
+ */
+ switch (bpp) {
+ case 18:
+ tbd = DP_TEST_BIT_DEPTH_6;
+ break;
+ case 24:
+ tbd = DP_TEST_BIT_DEPTH_8;
+ break;
+ case 30:
+ tbd = DP_TEST_BIT_DEPTH_10;
+ break;
+ default:
+ tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+ break;
+ }
+
+ if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
+ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+
+ return tbd;
+}
+
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
+{
+ struct dp_link_private *link;
+ struct dp_link *dp_link;
+
+ if (!dev || !aux) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+
+ link->dev = dev;
+ link->aux = aux;
+
+ mutex_init(&link->psm_mutex);
+ dp_link = &link->dp_link;
+
+ return dp_link;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
new file mode 100644
index 000000000..9dd4dd926
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_LINK_H_
+#define _DP_LINK_H_
+
+#include "dp_aux.h"
+
+#define DS_PORT_STATUS_CHANGED 0x200
+#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
+#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
+
+struct dp_link_info {
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int num_lanes;
+ unsigned long capabilities;
+};
+
+enum dp_link_voltage_level {
+ DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0,
+ DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1,
+ DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2,
+ DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2,
+};
+
+enum dp_link_preemaphasis_level {
+ DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0,
+ DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1,
+ DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2,
+ DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2,
+};
+
+struct dp_link_test_video {
+ u32 test_video_pattern;
+ u32 test_bit_depth;
+ u32 test_dyn_range;
+ u32 test_h_total;
+ u32 test_v_total;
+ u32 test_h_start;
+ u32 test_v_start;
+ u32 test_hsync_pol;
+ u32 test_hsync_width;
+ u32 test_vsync_pol;
+ u32 test_vsync_width;
+ u32 test_h_width;
+ u32 test_v_height;
+ u32 test_rr_d;
+ u32 test_rr_n;
+};
+
+struct dp_link_test_audio {
+ u32 test_audio_sampling_rate;
+ u32 test_audio_channel_count;
+ u32 test_audio_pattern_type;
+ u32 test_audio_period_ch_1;
+ u32 test_audio_period_ch_2;
+ u32 test_audio_period_ch_3;
+ u32 test_audio_period_ch_4;
+ u32 test_audio_period_ch_5;
+ u32 test_audio_period_ch_6;
+ u32 test_audio_period_ch_7;
+ u32 test_audio_period_ch_8;
+};
+
+struct dp_link_phy_params {
+ u32 phy_test_pattern_sel;
+ u8 v_level;
+ u8 p_level;
+};
+
+struct dp_link {
+ u32 sink_request;
+ u32 test_response;
+ bool psm_enabled;
+
+ u8 sink_count;
+ struct dp_link_test_video test_video;
+ struct dp_link_test_audio test_audio;
+ struct dp_link_phy_params phy_params;
+ struct dp_link_info link_params;
+};
+
+/**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
+{
+ /*
+ * Few simplistic rules and assumptions made here:
+ * 1. Bit depth is per color component
+ * 2. If bit depth is unknown return 0
+ * 3. Assume 3 color components
+ */
+ switch (tbd) {
+ case DP_TEST_BIT_DEPTH_6:
+ return 18;
+ case DP_TEST_BIT_DEPTH_8:
+ return 24;
+ case DP_TEST_BIT_DEPTH_10:
+ return 30;
+ case DP_TEST_BIT_DEPTH_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc
+ * @tbd: test bit depth
+ *
+ * Returns the bits per comp (bpc) to be used corresponding to the
+ * bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpc(u32 tbd)
+{
+ switch (tbd) {
+ case DP_TEST_BIT_DEPTH_6:
+ return 6;
+ case DP_TEST_BIT_DEPTH_8:
+ return 8;
+ case DP_TEST_BIT_DEPTH_10:
+ return 10;
+ case DP_TEST_BIT_DEPTH_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link);
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
+int dp_link_process_request(struct dp_link *dp_link);
+int dp_link_get_colorimetry_config(struct dp_link *dp_link);
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status);
+bool dp_link_send_test_response(struct dp_link *dp_link);
+int dp_link_psm_config(struct dp_link *dp_link,
+ struct dp_link_info *link_info, bool enable);
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum);
+
+/**
+ * dp_link_get() - get the functionalities of dp test module
+ *
+ *
+ * return: a pointer to dp_link struct
+ */
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux);
+
+#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
new file mode 100644
index 000000000..d38086650
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "dp_panel.h"
+
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
+struct dp_panel_private {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct dp_panel dp_panel;
+ struct drm_dp_aux *aux;
+ struct dp_link *link;
+ struct dp_catalog *catalog;
+ bool panel_on;
+ bool aux_cfg_update_done;
+};
+
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+{
+ int rc = 0;
+ size_t len;
+ ssize_t rlen;
+ struct dp_panel_private *panel;
+ struct dp_link_info *link_info;
+ u8 *dpcd, major = 0, minor = 0, temp;
+ u32 offset = DP_DPCD_REV;
+
+ dpcd = dp_panel->dpcd;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ link_info = &dp_panel->link_info;
+
+ rlen = drm_dp_dpcd_read(panel->aux, offset,
+ dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+ if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+ DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+ if (rlen == -ETIMEDOUT)
+ rc = rlen;
+ else
+ rc = -EINVAL;
+
+ goto end;
+ }
+
+ temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
+
+ /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
+ if (temp & BIT(7)) {
+ drm_dbg_dp(panel->drm_dev,
+ "using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
+ offset = DPRX_EXTENDED_DPCD_FIELD;
+ }
+
+ rlen = drm_dp_dpcd_read(panel->aux, offset,
+ dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+ if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+ DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+ if (rlen == -ETIMEDOUT)
+ rc = rlen;
+ else
+ rc = -EINVAL;
+
+ goto end;
+ }
+
+ link_info->revision = dpcd[DP_DPCD_REV];
+ major = (link_info->revision >> 4) & 0x0f;
+ minor = link_info->revision & 0x0f;
+
+ link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+ link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ if (link_info->num_lanes > dp_panel->max_dp_lanes)
+ link_info->num_lanes = dp_panel->max_dp_lanes;
+
+ /* Limit support upto HBR2 until HBR3 support is added */
+ if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4)))
+ link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
+
+ drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
+ drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
+ drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes);
+
+ if (drm_dp_enhanced_frame_cap(dpcd))
+ link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+ dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
+ dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
+
+ if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
+ dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
+ dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
+ len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
+
+ rlen = drm_dp_dpcd_read(panel->aux,
+ DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
+ if (rlen < len) {
+ DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ return rc;
+}
+
+static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+ u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+ struct dp_link_info *link_info;
+ const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+ u32 bpp = 0, data_rate_khz = 0;
+
+ bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
+
+ link_info = &dp_panel->link_info;
+ data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+ while (bpp > min_supported_bpp) {
+ if (mode_pclk_khz * bpp <= data_rate_khz)
+ break;
+ bpp -= 6;
+ }
+
+ return bpp;
+}
+
+static int dp_panel_update_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int rc = 0;
+
+ if (edid) {
+ rc = drm_connector_update_edid_property(connector, edid);
+ if (rc) {
+ DRM_ERROR("failed to update edid property %d\n", rc);
+ return rc;
+ }
+ rc = drm_add_edid_modes(connector, edid);
+ return rc;
+ }
+
+ rc = drm_connector_update_edid_property(connector, NULL);
+ if (rc)
+ DRM_ERROR("failed to update edid property %d\n", rc);
+
+ return rc;
+}
+
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ struct drm_connector *connector)
+{
+ int rc = 0, bw_code;
+ int rlen, count;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel || !connector) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
+ bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+ if (!is_link_rate_valid(bw_code) ||
+ !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
+ (bw_code > dp_panel->max_bw_code)) {
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+ dp_panel->link_info.num_lanes);
+ return -EINVAL;
+ }
+
+ if (dp_panel->dfp_present) {
+ rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
+ &count, 1);
+ if (rlen == 1) {
+ count = DP_GET_SINK_COUNT(count);
+ if (!count) {
+ DRM_ERROR("no downstream ports connected\n");
+ panel->link->sink_count = 0;
+ rc = -ENOTCONN;
+ goto end;
+ }
+ }
+ }
+
+ kfree(dp_panel->edid);
+ dp_panel->edid = NULL;
+
+ dp_panel->edid = drm_get_edid(connector,
+ &panel->aux->ddc);
+ if (!dp_panel->edid) {
+ DRM_ERROR("panel edid read failed\n");
+ /* check edid read fail is due to unplug */
+ if (!dp_catalog_link_is_connected(panel->catalog)) {
+ rc = -ETIMEDOUT;
+ goto end;
+ }
+ }
+
+ if (panel->aux_cfg_update_done) {
+ drm_dbg_dp(panel->drm_dev,
+ "read DPCD with updated AUX config\n");
+ rc = dp_panel_read_dpcd(dp_panel);
+ bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+ if (rc || !is_link_rate_valid(bw_code) ||
+ !is_lane_count_valid(dp_panel->link_info.num_lanes)
+ || (bw_code > dp_panel->max_bw_code)) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+ panel->aux_cfg_update_done = false;
+ }
+end:
+ return rc;
+}
+
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+ u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+ struct dp_panel_private *panel;
+ u32 bpp;
+
+ if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
+ DRM_ERROR("invalid input\n");
+ return 0;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ if (dp_panel->video_test)
+ bpp = dp_link_bit_depth_to_bpp(
+ panel->link->test_video.test_bit_depth);
+ else
+ bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+ mode_pclk_khz);
+
+ return bpp;
+}
+
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+ struct drm_connector *connector)
+{
+ if (!dp_panel) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (dp_panel->edid)
+ return dp_panel_update_modes(connector, dp_panel->edid);
+
+ return 0;
+}
+
+static u8 dp_panel_get_edid_checksum(struct edid *edid)
+{
+ edid += edid->extensions;
+
+ return edid->checksum;
+}
+
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+{
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
+ u8 checksum;
+
+ if (dp_panel->edid)
+ checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+ else
+ checksum = dp_panel->connector->real_edid_checksum;
+
+ dp_link_send_edid_checksum(panel->link, checksum);
+ dp_link_send_test_response(panel->link);
+ }
+}
+
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+{
+ struct dp_catalog *catalog;
+ struct dp_panel_private *panel;
+
+ if (!dp_panel) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+
+ if (!panel->panel_on) {
+ drm_dbg_dp(panel->drm_dev,
+ "DP panel not enabled, handle TPG on next on\n");
+ return;
+ }
+
+ if (!enable) {
+ dp_catalog_panel_tpg_disable(catalog);
+ return;
+ }
+
+ drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
+ dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
+}
+
+void dp_panel_dump_regs(struct dp_panel *dp_panel)
+{
+ struct dp_catalog *catalog;
+ struct dp_panel_private *panel;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+
+ dp_catalog_dump_regs(catalog);
+}
+
+int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+{
+ u32 data, total_ver, total_hor;
+ struct dp_catalog *catalog;
+ struct dp_panel_private *panel;
+ struct drm_display_mode *drm_mode;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+ catalog = panel->catalog;
+ drm_mode = &panel->dp_panel.dp_mode.drm_mode;
+
+ drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
+ drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
+ drm_mode->hsync_start - drm_mode->hdisplay,
+ drm_mode->hsync_end - drm_mode->hsync_start);
+
+ drm_dbg_dp(panel->drm_dev, "height=%d vporch= %d %d %d\n",
+ drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end,
+ drm_mode->vsync_start - drm_mode->vdisplay,
+ drm_mode->vsync_end - drm_mode->vsync_start);
+
+ total_hor = drm_mode->htotal;
+
+ total_ver = drm_mode->vtotal;
+
+ data = total_ver;
+ data <<= 16;
+ data |= total_hor;
+
+ catalog->total = data;
+
+ data = (drm_mode->vtotal - drm_mode->vsync_start);
+ data <<= 16;
+ data |= (drm_mode->htotal - drm_mode->hsync_start);
+
+ catalog->sync_start = data;
+
+ data = drm_mode->vsync_end - drm_mode->vsync_start;
+ data <<= 16;
+ data |= (panel->dp_panel.dp_mode.v_active_low << 31);
+ data |= drm_mode->hsync_end - drm_mode->hsync_start;
+ data |= (panel->dp_panel.dp_mode.h_active_low << 15);
+
+ catalog->width_blanking = data;
+
+ data = drm_mode->vdisplay;
+ data <<= 16;
+ data |= drm_mode->hdisplay;
+
+ catalog->dp_active = data;
+
+ dp_catalog_panel_timing_cfg(catalog);
+ panel->panel_on = true;
+
+ return 0;
+}
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+{
+ struct drm_display_mode *drm_mode;
+ struct dp_panel_private *panel;
+
+ drm_mode = &dp_panel->dp_mode.drm_mode;
+
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+ /*
+ * print resolution info as this is a result
+ * of user initiated action of cable connection
+ */
+ drm_dbg_dp(panel->drm_dev, "SET NEW RESOLUTION:\n");
+ drm_dbg_dp(panel->drm_dev, "%dx%d@%dfps\n",
+ drm_mode->hdisplay, drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
+ drm_dbg_dp(panel->drm_dev,
+ "h_porches(back|front|width) = (%d|%d|%d)\n",
+ drm_mode->htotal - drm_mode->hsync_end,
+ drm_mode->hsync_start - drm_mode->hdisplay,
+ drm_mode->hsync_end - drm_mode->hsync_start);
+ drm_dbg_dp(panel->drm_dev,
+ "v_porches(back|front|width) = (%d|%d|%d)\n",
+ drm_mode->vtotal - drm_mode->vsync_end,
+ drm_mode->vsync_start - drm_mode->vdisplay,
+ drm_mode->vsync_end - drm_mode->vsync_start);
+ drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n",
+ drm_mode->clock);
+ drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
+
+ dp_panel->dp_mode.bpp = max_t(u32, 18,
+ min_t(u32, dp_panel->dp_mode.bpp, 30));
+ drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
+ dp_panel->dp_mode.bpp);
+
+ return 0;
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in)
+{
+ struct dp_panel_private *panel;
+ struct dp_panel *dp_panel;
+
+ if (!in->dev || !in->catalog || !in->aux || !in->link) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return ERR_PTR(-ENOMEM);
+
+ panel->dev = in->dev;
+ panel->aux = in->aux;
+ panel->catalog = in->catalog;
+ panel->link = in->link;
+
+ dp_panel = &panel->dp_panel;
+ dp_panel->max_bw_code = DP_LINK_BW_8_1;
+ panel->aux_cfg_update_done = false;
+
+ return dp_panel;
+}
+
+void dp_panel_put(struct dp_panel *dp_panel)
+{
+ if (!dp_panel)
+ return;
+
+ kfree(dp_panel->edid);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
new file mode 100644
index 000000000..d861197ac
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PANEL_H_
+#define _DP_PANEL_H_
+
+#include <drm/msm_drm.h>
+
+#include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_hpd.h"
+
+struct edid;
+
+#define DPRX_EXTENDED_DPCD_FIELD 0x2200
+
+#define DP_DOWNSTREAM_PORTS 4
+#define DP_DOWNSTREAM_CAP_SIZE 4
+
+struct dp_display_mode {
+ struct drm_display_mode drm_mode;
+ u32 capabilities;
+ u32 bpp;
+ u32 h_active_low;
+ u32 v_active_low;
+};
+
+struct dp_panel_in {
+ struct device *dev;
+ struct drm_dp_aux *aux;
+ struct dp_link *link;
+ struct dp_catalog *catalog;
+};
+
+struct dp_panel {
+ /* dpcd raw data */
+ u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
+ u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE];
+ u32 ds_port_cnt;
+ u32 dfp_present;
+
+ struct dp_link_info link_info;
+ struct drm_dp_desc desc;
+ struct edid *edid;
+ struct drm_connector *connector;
+ struct dp_display_mode dp_mode;
+ bool video_test;
+
+ u32 vic;
+ u32 max_dp_lanes;
+
+ u32 max_bw_code;
+};
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel);
+int dp_panel_deinit(struct dp_panel *dp_panel);
+int dp_panel_timing_cfg(struct dp_panel *dp_panel);
+void dp_panel_dump_regs(struct dp_panel *dp_panel);
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+ struct drm_connector *connector);
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
+ u32 mode_pclk_khz);
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+ struct drm_connector *connector);
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
+
+/**
+ * is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static inline bool is_link_rate_valid(u32 bw_code)
+{
+ return (bw_code == DP_LINK_BW_1_62 ||
+ bw_code == DP_LINK_BW_2_7 ||
+ bw_code == DP_LINK_BW_5_4 ||
+ bw_code == DP_LINK_BW_8_1);
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static inline bool is_lane_count_valid(u32 lane_count)
+{
+ return (lane_count == 1 ||
+ lane_count == 2 ||
+ lane_count == 4);
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in);
+void dp_panel_put(struct dp_panel *dp_panel);
+#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
new file mode 100644
index 000000000..dcbe893d6
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_bridge.h>
+
+#include "dp_parser.h"
+#include "dp_reg.h"
+
+#define DP_DEFAULT_AHB_OFFSET 0x0000
+#define DP_DEFAULT_AHB_SIZE 0x0200
+#define DP_DEFAULT_AUX_OFFSET 0x0200
+#define DP_DEFAULT_AUX_SIZE 0x0200
+#define DP_DEFAULT_LINK_OFFSET 0x0400
+#define DP_DEFAULT_LINK_SIZE 0x0C00
+#define DP_DEFAULT_P0_OFFSET 0x1000
+#define DP_DEFAULT_P0_SIZE 0x0400
+
+static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
+ if (!IS_ERR(base))
+ *len = resource_size(res);
+
+ return base;
+}
+
+static int dp_parser_ctrl_res(struct dp_parser *parser)
+{
+ struct platform_device *pdev = parser->pdev;
+ struct dp_io *io = &parser->io;
+ struct dss_io_data *dss = &io->dp_controller;
+
+ dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
+ if (IS_ERR(dss->ahb.base))
+ return PTR_ERR(dss->ahb.base);
+
+ dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
+ if (IS_ERR(dss->aux.base)) {
+ /*
+ * The initial binding had a single reg, but in order to
+ * support variation in the sub-region sizes this was split.
+ * dp_ioremap() will fail with -EINVAL here if only a single
+ * reg is specified, so fill in the sub-region offsets and
+ * lengths based on this single region.
+ */
+ if (PTR_ERR(dss->aux.base) == -EINVAL) {
+ if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
+ DRM_ERROR("legacy memory region not large enough\n");
+ return -EINVAL;
+ }
+
+ dss->ahb.len = DP_DEFAULT_AHB_SIZE;
+ dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
+ dss->aux.len = DP_DEFAULT_AUX_SIZE;
+ dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
+ dss->link.len = DP_DEFAULT_LINK_SIZE;
+ dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
+ dss->p0.len = DP_DEFAULT_P0_SIZE;
+ } else {
+ DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
+ return PTR_ERR(dss->aux.base);
+ }
+ } else {
+ dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
+ if (IS_ERR(dss->link.base)) {
+ DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
+ return PTR_ERR(dss->link.base);
+ }
+
+ dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
+ if (IS_ERR(dss->p0.base)) {
+ DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
+ return PTR_ERR(dss->p0.base);
+ }
+ }
+
+ io->phy = devm_phy_get(&pdev->dev, "dp");
+ if (IS_ERR(io->phy))
+ return PTR_ERR(io->phy);
+
+ return 0;
+}
+
+static int dp_parser_misc(struct dp_parser *parser)
+{
+ struct device_node *of_node = parser->pdev->dev.of_node;
+ int len;
+
+ len = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
+ if (len < 0) {
+ DRM_WARN("Invalid property \"data-lanes\", default max DP lanes = %d\n",
+ DP_MAX_NUM_DP_LANES);
+ len = DP_MAX_NUM_DP_LANES;
+ }
+
+ parser->max_dp_lanes = len;
+ return 0;
+}
+
+static inline bool dp_parser_check_prefix(const char *clk_prefix,
+ const char *clk_name)
+{
+ return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
+}
+
+static int dp_parser_init_clk_data(struct dp_parser *parser)
+{
+ int num_clk, i, rc;
+ int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+ const char *clk_name;
+ struct device *dev = &parser->pdev->dev;
+ struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+ struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+ struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+ num_clk = of_property_count_strings(dev->of_node, "clock-names");
+ if (num_clk <= 0) {
+ DRM_ERROR("no clocks are defined\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(dev->of_node,
+ "clock-names", i, &clk_name);
+ if (rc < 0)
+ return rc;
+
+ if (dp_parser_check_prefix("core", clk_name))
+ core_clk_count++;
+
+ if (dp_parser_check_prefix("ctrl", clk_name))
+ ctrl_clk_count++;
+
+ if (dp_parser_check_prefix("stream", clk_name))
+ stream_clk_count++;
+ }
+
+ /* Initialize the CORE power module */
+ if (core_clk_count == 0) {
+ DRM_ERROR("no core clocks are defined\n");
+ return -EINVAL;
+ }
+
+ core_power->num_clk = core_clk_count;
+ core_power->clocks = devm_kcalloc(dev,
+ core_power->num_clk, sizeof(struct clk_bulk_data),
+ GFP_KERNEL);
+ if (!core_power->clocks)
+ return -ENOMEM;
+
+ /* Initialize the CTRL power module */
+ if (ctrl_clk_count == 0) {
+ DRM_ERROR("no ctrl clocks are defined\n");
+ return -EINVAL;
+ }
+
+ ctrl_power->num_clk = ctrl_clk_count;
+ ctrl_power->clocks = devm_kcalloc(dev,
+ ctrl_power->num_clk, sizeof(struct clk_bulk_data),
+ GFP_KERNEL);
+ if (!ctrl_power->clocks) {
+ ctrl_power->num_clk = 0;
+ return -ENOMEM;
+ }
+
+ /* Initialize the STREAM power module */
+ if (stream_clk_count == 0) {
+ DRM_ERROR("no stream (pixel) clocks are defined\n");
+ return -EINVAL;
+ }
+
+ stream_power->num_clk = stream_clk_count;
+ stream_power->clocks = devm_kcalloc(dev,
+ stream_power->num_clk, sizeof(struct clk_bulk_data),
+ GFP_KERNEL);
+ if (!stream_power->clocks) {
+ stream_power->num_clk = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int dp_parser_clock(struct dp_parser *parser)
+{
+ int rc = 0, i = 0;
+ int num_clk = 0;
+ int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
+ int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+ const char *clk_name;
+ struct device *dev = &parser->pdev->dev;
+ struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+ struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+ struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+ rc = dp_parser_init_clk_data(parser);
+ if (rc) {
+ DRM_ERROR("failed to initialize power data %d\n", rc);
+ return -EINVAL;
+ }
+
+ core_clk_count = core_power->num_clk;
+ ctrl_clk_count = ctrl_power->num_clk;
+ stream_clk_count = stream_power->num_clk;
+
+ num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(dev->of_node, "clock-names",
+ i, &clk_name);
+ if (rc) {
+ DRM_ERROR("error reading clock-names %d\n", rc);
+ return rc;
+ }
+ if (dp_parser_check_prefix("core", clk_name) &&
+ core_clk_index < core_clk_count) {
+ core_power->clocks[core_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
+ core_clk_index++;
+ } else if (dp_parser_check_prefix("stream", clk_name) &&
+ stream_clk_index < stream_clk_count) {
+ stream_power->clocks[stream_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
+ stream_clk_index++;
+ } else if (dp_parser_check_prefix("ctrl", clk_name) &&
+ ctrl_clk_index < ctrl_clk_count) {
+ ctrl_power->clocks[ctrl_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
+ ctrl_clk_index++;
+ }
+ }
+
+ return 0;
+}
+
+int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser)
+{
+ struct platform_device *pdev = parser->pdev;
+ struct drm_bridge *bridge;
+
+ bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ parser->next_bridge = bridge;
+
+ return 0;
+}
+
+static int dp_parser_parse(struct dp_parser *parser)
+{
+ int rc = 0;
+
+ if (!parser) {
+ DRM_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ rc = dp_parser_ctrl_res(parser);
+ if (rc)
+ return rc;
+
+ rc = dp_parser_misc(parser);
+ if (rc)
+ return rc;
+
+ rc = dp_parser_clock(parser);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+struct dp_parser *dp_parser_get(struct platform_device *pdev)
+{
+ struct dp_parser *parser;
+
+ parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
+ if (!parser)
+ return ERR_PTR(-ENOMEM);
+
+ parser->parse = dp_parser_parse;
+ parser->pdev = pdev;
+
+ return parser;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
new file mode 100644
index 000000000..d30ab773d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PARSER_H_
+#define _DP_PARSER_H_
+
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+
+#include "msm_drv.h"
+
+#define DP_LABEL "MDSS DP DISPLAY"
+#define DP_MAX_PIXEL_CLK_KHZ 675000
+#define DP_MAX_NUM_DP_LANES 4
+
+enum dp_pm_type {
+ DP_CORE_PM,
+ DP_CTRL_PM,
+ DP_STREAM_PM,
+ DP_PHY_PM,
+ DP_MAX_PM
+};
+
+struct dss_io_region {
+ size_t len;
+ void __iomem *base;
+};
+
+struct dss_io_data {
+ struct dss_io_region ahb;
+ struct dss_io_region aux;
+ struct dss_io_region link;
+ struct dss_io_region p0;
+};
+
+static inline const char *dp_parser_pm_name(enum dp_pm_type module)
+{
+ switch (module) {
+ case DP_CORE_PM: return "DP_CORE_PM";
+ case DP_CTRL_PM: return "DP_CTRL_PM";
+ case DP_STREAM_PM: return "DP_STREAM_PM";
+ case DP_PHY_PM: return "DP_PHY_PM";
+ default: return "???";
+ }
+}
+
+/**
+ * struct dp_display_data - display related device tree data.
+ *
+ * @ctrl_node: referece to controller device
+ * @phy_node: reference to phy device
+ * @is_active: is the controller currently active
+ * @name: name of the display
+ * @display_type: type of the display
+ */
+struct dp_display_data {
+ struct device_node *ctrl_node;
+ struct device_node *phy_node;
+ bool is_active;
+ const char *name;
+ const char *display_type;
+};
+
+/**
+ * struct dp_ctrl_resource - controller's IO related data
+ *
+ * @dp_controller: Display Port controller mapped memory address
+ * @phy_io: phy's mapped memory address
+ */
+struct dp_io {
+ struct dss_io_data dp_controller;
+ struct phy *phy;
+ union phy_configure_opts phy_opts;
+};
+
+/**
+ * struct dp_pinctrl - DP's pin control
+ *
+ * @pin: pin-controller's instance
+ * @state_active: active state pin control
+ * @state_hpd_active: hpd active state pin control
+ * @state_suspend: suspend state pin control
+ */
+struct dp_pinctrl {
+ struct pinctrl *pin;
+ struct pinctrl_state *state_active;
+ struct pinctrl_state *state_hpd_active;
+ struct pinctrl_state *state_suspend;
+};
+
+/* Regulators for DP devices */
+struct dp_reg_entry {
+ char name[32];
+ int enable_load;
+ int disable_load;
+};
+
+struct dss_module_power {
+ unsigned int num_clk;
+ struct clk_bulk_data *clocks;
+};
+
+/**
+ * struct dp_parser - DP parser's data exposed to clients
+ *
+ * @pdev: platform data of the client
+ * @mp: gpio, regulator and clock related data
+ * @pinctrl: pin-control related data
+ * @disp_data: controller's display related data
+ * @parse: function to be called by client to parse device tree.
+ */
+struct dp_parser {
+ struct platform_device *pdev;
+ struct dss_module_power mp[DP_MAX_PM];
+ struct dp_pinctrl pinctrl;
+ struct dp_io io;
+ struct dp_display_data disp_data;
+ u32 max_dp_lanes;
+ struct drm_bridge *next_bridge;
+
+ int (*parse)(struct dp_parser *parser);
+};
+
+/**
+ * dp_parser_get() - get the DP's device tree parser module
+ *
+ * @pdev: platform data of the client
+ * return: pointer to dp_parser structure.
+ *
+ * This function provides client capability to parse the
+ * device tree and populate the data structures. The data
+ * related to clock, regulators, pin-control and other
+ * can be parsed using this module.
+ */
+struct dp_parser *dp_parser_get(struct platform_device *pdev);
+
+/**
+ * devm_dp_parser_find_next_bridge() - find an additional bridge to DP
+ *
+ * @dev: device to tie bridge lifetime to
+ * @parser: dp_parser data from client
+ *
+ * This function is used to find any additional bridge attached to
+ * the DP controller. The eDP interface requires a panel bridge.
+ *
+ * Return: 0 if able to get the bridge, otherwise negative errno for failure.
+ */
+int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser);
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
new file mode 100644
index 000000000..c0aaabb03
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_opp.h>
+#include "dp_power.h"
+#include "msm_drv.h"
+
+struct dp_power_private {
+ struct dp_parser *parser;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct clk *link_clk_src;
+ struct clk *pixel_provider;
+ struct clk *link_provider;
+
+ struct dp_power dp_power;
+};
+
+static int dp_power_clk_init(struct dp_power_private *power)
+{
+ int rc = 0;
+ struct dss_module_power *core, *ctrl, *stream;
+ struct device *dev = &power->pdev->dev;
+
+ core = &power->parser->mp[DP_CORE_PM];
+ ctrl = &power->parser->mp[DP_CTRL_PM];
+ stream = &power->parser->mp[DP_STREAM_PM];
+
+ rc = devm_clk_bulk_get(dev, core->num_clk, core->clocks);
+ if (rc) {
+ DRM_ERROR("failed to get %s clk. err=%d\n",
+ dp_parser_pm_name(DP_CORE_PM), rc);
+ return rc;
+ }
+
+ rc = devm_clk_bulk_get(dev, ctrl->num_clk, ctrl->clocks);
+ if (rc) {
+ DRM_ERROR("failed to get %s clk. err=%d\n",
+ dp_parser_pm_name(DP_CTRL_PM), rc);
+ return -ENODEV;
+ }
+
+ rc = devm_clk_bulk_get(dev, stream->num_clk, stream->clocks);
+ if (rc) {
+ DRM_ERROR("failed to get %s clk. err=%d\n",
+ dp_parser_pm_name(DP_CTRL_PM), rc);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
+{
+ struct dp_power_private *power;
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ drm_dbg_dp(power->drm_dev,
+ "core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
+ dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on);
+
+ if (pm_type == DP_CORE_PM)
+ return dp_power->core_clks_on;
+
+ if (pm_type == DP_CTRL_PM)
+ return dp_power->link_clks_on;
+
+ if (pm_type == DP_STREAM_PM)
+ return dp_power->stream_clks_on;
+
+ return 0;
+}
+
+int dp_power_clk_enable(struct dp_power *dp_power,
+ enum dp_pm_type pm_type, bool enable)
+{
+ int rc = 0;
+ struct dp_power_private *power;
+ struct dss_module_power *mp;
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM &&
+ pm_type != DP_STREAM_PM) {
+ DRM_ERROR("unsupported power module: %s\n",
+ dp_parser_pm_name(pm_type));
+ return -EINVAL;
+ }
+
+ if (enable) {
+ if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
+ drm_dbg_dp(power->drm_dev,
+ "core clks already enabled\n");
+ return 0;
+ }
+
+ if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
+ drm_dbg_dp(power->drm_dev,
+ "links clks already enabled\n");
+ return 0;
+ }
+
+ if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
+ drm_dbg_dp(power->drm_dev,
+ "pixel clks already enabled\n");
+ return 0;
+ }
+
+ if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
+ drm_dbg_dp(power->drm_dev,
+ "Enable core clks before link clks\n");
+ mp = &power->parser->mp[DP_CORE_PM];
+
+ rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
+ if (rc) {
+ DRM_ERROR("fail to enable clks: %s. err=%d\n",
+ dp_parser_pm_name(DP_CORE_PM), rc);
+ return rc;
+ }
+ dp_power->core_clks_on = true;
+ }
+ }
+
+ mp = &power->parser->mp[pm_type];
+ if (enable) {
+ rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks);
+ if (rc) {
+ DRM_ERROR("failed to enable clks, err: %d\n", rc);
+ return rc;
+ }
+ } else {
+ clk_bulk_disable_unprepare(mp->num_clk, mp->clocks);
+ }
+
+ if (pm_type == DP_CORE_PM)
+ dp_power->core_clks_on = enable;
+ else if (pm_type == DP_STREAM_PM)
+ dp_power->stream_clks_on = enable;
+ else
+ dp_power->link_clks_on = enable;
+
+ drm_dbg_dp(power->drm_dev, "%s clocks for %s\n",
+ enable ? "enable" : "disable",
+ dp_parser_pm_name(pm_type));
+ drm_dbg_dp(power->drm_dev,
+ "strem_clks:%s link_clks:%s core_clks:%s\n",
+ dp_power->stream_clks_on ? "on" : "off",
+ dp_power->link_clks_on ? "on" : "off",
+ dp_power->core_clks_on ? "on" : "off");
+
+ return 0;
+}
+
+int dp_power_client_init(struct dp_power *dp_power)
+{
+ int rc = 0;
+ struct dp_power_private *power;
+
+ if (!dp_power) {
+ DRM_ERROR("invalid power data\n");
+ return -EINVAL;
+ }
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ pm_runtime_enable(&power->pdev->dev);
+
+ rc = dp_power_clk_init(power);
+ if (rc)
+ DRM_ERROR("failed to init clocks %d\n", rc);
+
+ return rc;
+}
+
+void dp_power_client_deinit(struct dp_power *dp_power)
+{
+ struct dp_power_private *power;
+
+ if (!dp_power) {
+ DRM_ERROR("invalid power data\n");
+ return;
+ }
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ pm_runtime_disable(&power->pdev->dev);
+}
+
+int dp_power_init(struct dp_power *dp_power, bool flip)
+{
+ int rc = 0;
+ struct dp_power_private *power = NULL;
+
+ if (!dp_power) {
+ DRM_ERROR("invalid power data\n");
+ return -EINVAL;
+ }
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ pm_runtime_get_sync(&power->pdev->dev);
+
+ rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
+ if (rc) {
+ DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ pm_runtime_put_sync(&power->pdev->dev);
+ return rc;
+}
+
+int dp_power_deinit(struct dp_power *dp_power)
+{
+ struct dp_power_private *power;
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ dp_power_clk_enable(dp_power, DP_CORE_PM, false);
+ pm_runtime_put_sync(&power->pdev->dev);
+ return 0;
+}
+
+struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
+{
+ struct dp_power_private *power;
+ struct dp_power *dp_power;
+
+ if (!parser) {
+ DRM_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return ERR_PTR(-ENOMEM);
+
+ power->parser = parser;
+ power->pdev = parser->pdev;
+ power->dev = dev;
+
+ dp_power = &power->dp_power;
+
+ return dp_power;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
new file mode 100644
index 000000000..e3f959ffa
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_POWER_H_
+#define _DP_POWER_H_
+
+#include "dp_parser.h"
+
+/**
+ * sruct dp_power - DisplayPort's power related data
+ *
+ * @init: initializes the regulators/core clocks/GPIOs/pinctrl
+ * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
+ * @clk_enable: enable/disable the DP clocks
+ * @set_pixel_clk_parent: set the parent of DP pixel clock
+ */
+struct dp_power {
+ bool core_clks_on;
+ bool link_clks_on;
+ bool stream_clks_on;
+};
+
+/**
+ * dp_power_init() - enable power supplies for display controller
+ *
+ * @power: instance of power module
+ * @flip: bool for flipping gpio direction
+ * return: 0 if success or error if failure.
+ *
+ * This API will turn on the regulators and configures gpio's
+ * aux/hpd.
+ */
+int dp_power_init(struct dp_power *power, bool flip);
+
+/**
+ * dp_power_deinit() - turn off regulators and gpios.
+ *
+ * @power: instance of power module
+ * return: 0 for success
+ *
+ * This API turns off power and regulators.
+ */
+int dp_power_deinit(struct dp_power *power);
+
+/**
+ * dp_power_clk_status() - display controller clocks status
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * return: status of power clocks
+ *
+ * This API return status of DP clocks
+ */
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type);
+
+/**
+ * dp_power_clk_enable() - enable display controller clocks
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * @enable: enables or disables
+ * return: pointer to allocated power module data
+ *
+ * This API will call setrate and enable for DP clocks
+ */
+
+int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
+ bool enable);
+
+/**
+ * dp_power_client_init() - initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will configure the DisplayPort's clocks and regulator
+ * modules.
+ */
+int dp_power_client_init(struct dp_power *power);
+
+/**
+ * dp_power_clinet_deinit() - de-initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will de-initialize the DisplayPort's clocks and regulator
+ * modules.
+ */
+void dp_power_client_deinit(struct dp_power *power);
+
+/**
+ * dp_power_get() - configure and get the DisplayPort power module data
+ *
+ * @parser: instance of parser module
+ * return: pointer to allocated power module data
+ *
+ * This API will configure the DisplayPort's power module and provides
+ * methods to be called by the client to configure the power related
+ * modules.
+ */
+struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser);
+
+#endif /* _DP_POWER_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
new file mode 100644
index 000000000..268602803
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_REG_H_
+#define _DP_REG_H_
+
+/* DP_TX Registers */
+#define REG_DP_HW_VERSION (0x00000000)
+
+#define REG_DP_SW_RESET (0x00000010)
+#define DP_SW_RESET (0x00000001)
+
+#define REG_DP_PHY_CTRL (0x00000014)
+#define DP_PHY_CTRL_SW_RESET_PLL (0x00000001)
+#define DP_PHY_CTRL_SW_RESET (0x00000004)
+
+#define REG_DP_CLK_CTRL (0x00000018)
+#define REG_DP_CLK_ACTIVE (0x0000001C)
+#define REG_DP_INTR_STATUS (0x00000020)
+#define REG_DP_INTR_STATUS2 (0x00000024)
+#define REG_DP_INTR_STATUS3 (0x00000028)
+
+#define REG_DP_DP_HPD_CTRL (0x00000000)
+#define DP_DP_HPD_CTRL_HPD_EN (0x00000001)
+
+#define REG_DP_DP_HPD_INT_STATUS (0x00000004)
+
+#define REG_DP_DP_HPD_INT_ACK (0x00000008)
+#define DP_DP_HPD_PLUG_INT_ACK (0x00000001)
+#define DP_DP_IRQ_HPD_INT_ACK (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_ACK (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008)
+#define DP_DP_HPD_STATE_STATUS_BITS_MASK (0x0000000F)
+#define DP_DP_HPD_STATE_STATUS_BITS_SHIFT (0x1C)
+
+#define REG_DP_DP_HPD_INT_MASK (0x0000000C)
+#define DP_DP_HPD_PLUG_INT_MASK (0x00000001)
+#define DP_DP_IRQ_HPD_INT_MASK (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_MASK (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_MASK (0x00000008)
+#define DP_DP_HPD_INT_MASK (DP_DP_HPD_PLUG_INT_MASK | \
+ DP_DP_IRQ_HPD_INT_MASK | \
+ DP_DP_HPD_REPLUG_INT_MASK | \
+ DP_DP_HPD_UNPLUG_INT_MASK)
+#define DP_DP_HPD_STATE_STATUS_CONNECTED (0x40000000)
+#define DP_DP_HPD_STATE_STATUS_PENDING (0x20000000)
+#define DP_DP_HPD_STATE_STATUS_DISCONNECTED (0x00000000)
+#define DP_DP_HPD_STATE_STATUS_MASK (0xE0000000)
+
+#define REG_DP_DP_HPD_REFTIMER (0x00000018)
+#define DP_DP_HPD_REFTIMER_ENABLE (1 << 16)
+
+#define REG_DP_DP_HPD_EVENT_TIME_0 (0x0000001C)
+#define REG_DP_DP_HPD_EVENT_TIME_1 (0x00000020)
+#define DP_DP_HPD_EVENT_TIME_0_VAL (0x3E800FA)
+#define DP_DP_HPD_EVENT_TIME_1_VAL (0x1F407D0)
+
+#define REG_DP_AUX_CTRL (0x00000030)
+#define DP_AUX_CTRL_ENABLE (0x00000001)
+#define DP_AUX_CTRL_RESET (0x00000002)
+
+#define REG_DP_AUX_DATA (0x00000034)
+#define DP_AUX_DATA_READ (0x00000001)
+#define DP_AUX_DATA_WRITE (0x00000000)
+#define DP_AUX_DATA_OFFSET (0x00000008)
+#define DP_AUX_DATA_INDEX_OFFSET (0x00000010)
+#define DP_AUX_DATA_MASK (0x0000ff00)
+#define DP_AUX_DATA_INDEX_WRITE (0x80000000)
+
+#define REG_DP_AUX_TRANS_CTRL (0x00000038)
+#define DP_AUX_TRANS_CTRL_I2C (0x00000100)
+#define DP_AUX_TRANS_CTRL_GO (0x00000200)
+#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR (0x00000400)
+#define DP_AUX_TRANS_CTRL_NO_SEND_STOP (0x00000800)
+
+#define REG_DP_TIMEOUT_COUNT (0x0000003C)
+#define REG_DP_AUX_LIMITS (0x00000040)
+#define REG_DP_AUX_STATUS (0x00000044)
+
+#define DP_DPCD_CP_IRQ (0x201)
+#define DP_DPCD_RXSTATUS (0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM (0x000000A0)
+
+#define REG_DP_MAINLINK_CTRL (0x00000000)
+#define DP_MAINLINK_CTRL_ENABLE (0x00000001)
+#define DP_MAINLINK_CTRL_RESET (0x00000002)
+#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER (0x00000010)
+#define DP_MAINLINK_FB_BOUNDARY_SEL (0x02000000)
+
+#define REG_DP_STATE_CTRL (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1 (0x00000001)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2 (0x00000002)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3 (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4 (0x00000008)
+#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE (0x00000010)
+#define DP_STATE_CTRL_LINK_PRBS7 (0x00000020)
+#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040)
+#define DP_STATE_CTRL_SEND_VIDEO (0x00000080)
+#define DP_STATE_CTRL_PUSH_IDLE (0x00000100)
+
+#define REG_DP_CONFIGURATION_CTRL (0x00000008)
+#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK (0x00000001)
+#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002)
+#define DP_CONFIGURATION_CTRL_P_INTERLACED (0x00000004)
+#define DP_CONFIGURATION_CTRL_INTERLACED_BTF (0x00000008)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES (0x00000010)
+#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040)
+#define DP_CONFIGURATION_CTRL_SEND_VSC (0x00000080)
+#define DP_CONFIGURATION_CTRL_BPC (0x00000100)
+#define DP_CONFIGURATION_CTRL_ASSR (0x00000400)
+#define DP_CONFIGURATION_CTRL_RGB_YUV (0x00000800)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV (0x00002000)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT (0x04)
+#define DP_CONFIGURATION_CTRL_BPC_SHIFT (0x08)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT (0x0D)
+
+#define REG_DP_SOFTWARE_MVID (0x00000010)
+#define REG_DP_SOFTWARE_NVID (0x00000018)
+#define REG_DP_TOTAL_HOR_VER (0x0000001C)
+#define REG_DP_START_HOR_VER_FROM_SYNC (0x00000020)
+#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024)
+#define REG_DP_ACTIVE_HOR_VER (0x00000028)
+
+#define REG_DP_MISC1_MISC0 (0x0000002C)
+#define DP_MISC0_SYNCHRONOUS_CLK (0x00000001)
+#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
+#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+
+#define REG_DP_VALID_BOUNDARY (0x00000030)
+#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
+
+#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038)
+#define LANE0_MAPPING_SHIFT (0x00000000)
+#define LANE1_MAPPING_SHIFT (0x00000002)
+#define LANE2_MAPPING_SHIFT (0x00000004)
+#define LANE3_MAPPING_SHIFT (0x00000006)
+
+#define REG_DP_MAINLINK_READY (0x00000040)
+#define DP_MAINLINK_READY_FOR_VIDEO (0x00000001)
+#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT (0x00000003)
+
+#define REG_DP_MAINLINK_LEVELS (0x00000044)
+#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2 (0x00000002)
+
+
+#define REG_DP_TU (0x0000004C)
+
+#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054)
+#define DP_HBR2_ERM_PATTERN (0x00010000)
+
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8)
+
+#define MMSS_DP_MISC1_MISC0 (0x0000002C)
+#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080)
+#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084)
+#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088)
+#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090)
+#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094)
+#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098)
+
+#define MMSS_DP_PSR_CRC_RG (0x00000154)
+#define MMSS_DP_PSR_CRC_B (0x00000158)
+
+#define REG_DP_COMPRESSION_MODE_CTRL (0x00000180)
+
+#define MMSS_DP_AUDIO_CFG (0x00000200)
+#define MMSS_DP_AUDIO_STATUS (0x00000204)
+#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208)
+#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C)
+#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210)
+#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214)
+
+#define MMSS_DP_SDP_CFG (0x00000228)
+#define MMSS_DP_SDP_CFG2 (0x0000022C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230)
+#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234)
+
+#define MMSS_DP_AUDIO_STREAM_0 (0x00000240)
+#define MMSS_DP_AUDIO_STREAM_1 (0x00000244)
+
+#define MMSS_DP_EXTENSION_0 (0x00000250)
+#define MMSS_DP_EXTENSION_1 (0x00000254)
+#define MMSS_DP_EXTENSION_2 (0x00000258)
+#define MMSS_DP_EXTENSION_3 (0x0000025C)
+#define MMSS_DP_EXTENSION_4 (0x00000260)
+#define MMSS_DP_EXTENSION_5 (0x00000264)
+#define MMSS_DP_EXTENSION_6 (0x00000268)
+#define MMSS_DP_EXTENSION_7 (0x0000026C)
+#define MMSS_DP_EXTENSION_8 (0x00000270)
+#define MMSS_DP_EXTENSION_9 (0x00000274)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C)
+#define MMSS_DP_AUDIO_ISRC_0 (0x00000290)
+#define MMSS_DP_AUDIO_ISRC_1 (0x00000294)
+#define MMSS_DP_AUDIO_ISRC_2 (0x00000298)
+#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C)
+#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0)
+#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0)
+
+#define MMSS_DP_GENERIC0_0 (0x00000300)
+#define MMSS_DP_GENERIC0_1 (0x00000304)
+#define MMSS_DP_GENERIC0_2 (0x00000308)
+#define MMSS_DP_GENERIC0_3 (0x0000030C)
+#define MMSS_DP_GENERIC0_4 (0x00000310)
+#define MMSS_DP_GENERIC0_5 (0x00000314)
+#define MMSS_DP_GENERIC0_6 (0x00000318)
+#define MMSS_DP_GENERIC0_7 (0x0000031C)
+#define MMSS_DP_GENERIC0_8 (0x00000320)
+#define MMSS_DP_GENERIC0_9 (0x00000324)
+#define MMSS_DP_GENERIC1_0 (0x00000328)
+#define MMSS_DP_GENERIC1_1 (0x0000032C)
+#define MMSS_DP_GENERIC1_2 (0x00000330)
+#define MMSS_DP_GENERIC1_3 (0x00000334)
+#define MMSS_DP_GENERIC1_4 (0x00000338)
+#define MMSS_DP_GENERIC1_5 (0x0000033C)
+#define MMSS_DP_GENERIC1_6 (0x00000340)
+#define MMSS_DP_GENERIC1_7 (0x00000344)
+#define MMSS_DP_GENERIC1_8 (0x00000348)
+#define MMSS_DP_GENERIC1_9 (0x0000034C)
+
+#define MMSS_DP_VSCEXT_0 (0x000002D0)
+#define MMSS_DP_VSCEXT_1 (0x000002D4)
+#define MMSS_DP_VSCEXT_2 (0x000002D8)
+#define MMSS_DP_VSCEXT_3 (0x000002DC)
+#define MMSS_DP_VSCEXT_4 (0x000002E0)
+#define MMSS_DP_VSCEXT_5 (0x000002E4)
+#define MMSS_DP_VSCEXT_6 (0x000002E8)
+#define MMSS_DP_VSCEXT_7 (0x000002EC)
+#define MMSS_DP_VSCEXT_8 (0x000002F0)
+#define MMSS_DP_VSCEXT_9 (0x000002F4)
+
+#define MMSS_DP_BIST_ENABLE (0x00000000)
+#define DP_BIST_ENABLE_DPBIST_EN (0x00000001)
+
+#define MMSS_DP_TIMING_ENGINE_EN (0x00000010)
+#define DP_TIMING_ENGINE_EN_EN (0x00000001)
+
+#define MMSS_DP_INTF_CONFIG (0x00000014)
+#define MMSS_DP_INTF_HSYNC_CTL (0x00000018)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028)
+#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C)
+#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030)
+#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034)
+#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038)
+#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C)
+#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040)
+#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044)
+#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048)
+#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C)
+#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050)
+#define MMSS_DP_INTF_POLARITY_CTL (0x00000058)
+
+#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060)
+#define MMSS_DP_DSC_DTO (0x0000007C)
+#define DP_TPG_CHECKERED_RECT_PATTERN (0x00000100)
+
+#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064)
+#define DP_TPG_VIDEO_CONFIG_BPP_8BIT (0x00000001)
+#define DP_TPG_VIDEO_CONFIG_RGB (0x00000004)
+
+#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088)
+
+#define REG_DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C)
+#define REG_DP_PHY_AUX_BIST_CFG (0x00000050)
+#define REG_DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL (0x0A0)
+#define DP_HDCP_STATUS (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV (0x098)
+#define DP_HDCP_SW_LOWER_AKSV (0x09C)
+#define DP_HDCP_ENTROPY_CTRL0 (0x350)
+#define DP_HDCP_ENTROPY_CTRL1 (0x35C)
+#define DP_HDCP_SHA_STATUS (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3 (0x0A4)
+#define DP_HDCP_RCVPORT_DATA4 (0x0A8)
+#define DP_HDCP_RCVPORT_DATA5 (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6 (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020)
+
+#endif /* _DP_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
new file mode 100644
index 000000000..e9036e403
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi.h"
+#include "dsi_cfg.h"
+
+bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+ unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
+
+ return !(host_flags & MIPI_DSI_MODE_VIDEO);
+}
+
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+{
+ return msm_dsi_host_get_dsc_config(msm_dsi->host);
+}
+
+static int dsi_get_phy(struct msm_dsi *msm_dsi)
+{
+ struct platform_device *pdev = msm_dsi->pdev;
+ struct platform_device *phy_pdev;
+ struct device_node *phy_node;
+
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
+ if (!phy_node) {
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
+ return -ENXIO;
+ }
+
+ phy_pdev = of_find_device_by_node(phy_node);
+ if (phy_pdev) {
+ msm_dsi->phy = platform_get_drvdata(phy_pdev);
+ msm_dsi->phy_dev = &phy_pdev->dev;
+ }
+
+ of_node_put(phy_node);
+
+ if (!phy_pdev) {
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!msm_dsi->phy) {
+ put_device(&phy_pdev->dev);
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ return 0;
+}
+
+static void dsi_destroy(struct msm_dsi *msm_dsi)
+{
+ if (!msm_dsi)
+ return;
+
+ msm_dsi_manager_unregister(msm_dsi);
+
+ if (msm_dsi->phy_dev) {
+ put_device(msm_dsi->phy_dev);
+ msm_dsi->phy = NULL;
+ msm_dsi->phy_dev = NULL;
+ }
+
+ if (msm_dsi->host) {
+ msm_dsi_host_destroy(msm_dsi->host);
+ msm_dsi->host = NULL;
+ }
+
+ platform_set_drvdata(msm_dsi->pdev, NULL);
+}
+
+static struct msm_dsi *dsi_init(struct platform_device *pdev)
+{
+ struct msm_dsi *msm_dsi;
+ int ret;
+
+ if (!pdev)
+ return ERR_PTR(-ENXIO);
+
+ msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL);
+ if (!msm_dsi)
+ return ERR_PTR(-ENOMEM);
+ DBG("dsi probed=%p", msm_dsi);
+
+ msm_dsi->id = -1;
+ msm_dsi->pdev = pdev;
+ platform_set_drvdata(pdev, msm_dsi);
+
+ /* Init dsi host */
+ ret = msm_dsi_host_init(msm_dsi);
+ if (ret)
+ goto destroy_dsi;
+
+ /* GET dsi PHY */
+ ret = dsi_get_phy(msm_dsi);
+ if (ret)
+ goto destroy_dsi;
+
+ /* Register to dsi manager */
+ ret = msm_dsi_manager_register(msm_dsi);
+ if (ret)
+ goto destroy_dsi;
+
+ return msm_dsi;
+
+destroy_dsi:
+ dsi_destroy(msm_dsi);
+ return ERR_PTR(ret);
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+
+ priv->dsi[msm_dsi->id] = msm_dsi;
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_dsi *msm_dsi = dev_get_drvdata(dev);
+
+ msm_dsi_tx_buf_free(msm_dsi->host);
+ priv->dsi[msm_dsi->id] = NULL;
+}
+
+static const struct component_ops dsi_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+int dsi_dev_attach(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dsi_ops);
+}
+
+void dsi_dev_detach(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_dev_probe(struct platform_device *pdev)
+{
+ struct msm_dsi *msm_dsi;
+
+ DBG("");
+ msm_dsi = dsi_init(pdev);
+ if (IS_ERR(msm_dsi)) {
+ /* Don't fail the bind if the dsi port is not connected */
+ if (PTR_ERR(msm_dsi) == -ENODEV)
+ return 0;
+ else
+ return PTR_ERR(msm_dsi);
+ }
+
+ return 0;
+}
+
+static int dsi_dev_remove(struct platform_device *pdev)
+{
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+
+ DBG("");
+ dsi_destroy(msm_dsi);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,mdss-dsi-ctrl", .data = NULL /* autodetect cfg */ },
+ { .compatible = "qcom,dsi-ctrl-6g-qcm2290", .data = &qcm2290_dsi_cfg_handler },
+ {}
+};
+
+static const struct dev_pm_ops dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver dsi_driver = {
+ .probe = dsi_dev_probe,
+ .remove = dsi_dev_remove,
+ .driver = {
+ .name = "msm_dsi",
+ .of_match_table = dt_match,
+ .pm = &dsi_pm_ops,
+ },
+};
+
+void __init msm_dsi_register(void)
+{
+ DBG("");
+ msm_dsi_phy_driver_register();
+ platform_driver_register(&dsi_driver);
+}
+
+void __exit msm_dsi_unregister(void)
+{
+ DBG("");
+ msm_dsi_phy_driver_unregister();
+ platform_driver_unregister(&dsi_driver);
+}
+
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
+ return -EINVAL;
+
+ priv = dev->dev_private;
+
+ if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+ DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+ return -ENOSPC;
+ }
+
+ msm_dsi->dev = dev;
+
+ ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
+ goto fail;
+ }
+
+ if (msm_dsi_is_bonded_dsi(msm_dsi) &&
+ !msm_dsi_is_master_dsi(msm_dsi)) {
+ /*
+ * Do not return an eror here,
+ * Just skip creating encoder/connector for the slave-DSI.
+ */
+ return 0;
+ }
+
+ msm_dsi->encoder = encoder;
+
+ msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
+ if (IS_ERR(msm_dsi->bridge)) {
+ ret = PTR_ERR(msm_dsi->bridge);
+ DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ msm_dsi->bridge = NULL;
+ goto fail;
+ }
+
+ ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev,
+ "failed to create dsi connector: %d\n", ret);
+ goto fail;
+ }
+
+ priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
+
+ return 0;
+fail:
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
+
+ return ret;
+}
+
+void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
+{
+ msm_dsi_host_snapshot(disp_state, msm_dsi->host);
+ msm_dsi_phy_snapshot(disp_state, msm_dsi->phy);
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
new file mode 100644
index 000000000..6b239f77f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DSI_CONNECTOR_H__
+#define __DSI_CONNECTOR_H__
+
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "msm_drv.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DSI_0 0
+#define DSI_1 1
+#define DSI_MAX 2
+
+struct msm_dsi_phy_shared_timings;
+struct msm_dsi_phy_clk_request;
+
+enum msm_dsi_phy_usecase {
+ MSM_DSI_PHY_STANDALONE,
+ MSM_DSI_PHY_MASTER,
+ MSM_DSI_PHY_SLAVE,
+};
+
+#define DSI_BUS_CLK_MAX 4
+
+struct msm_dsi {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ /* internal dsi bridge attached to MDP interface */
+ struct drm_bridge *bridge;
+
+ struct mipi_dsi_host *host;
+ struct msm_dsi_phy *phy;
+
+ /*
+ * external_bridge connected to dsi bridge output
+ */
+ struct drm_bridge *external_bridge;
+
+ struct device *phy_dev;
+ bool phy_enabled;
+
+ /* the encoder we are hooked to (outside of dsi block) */
+ struct drm_encoder *encoder;
+
+ int id;
+};
+
+/* dsi manager */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
+int msm_dsi_manager_ext_bridge_init(u8 id);
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+void msm_dsi_manager_tpg_enable(void);
+
+/* msm dsi */
+static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
+{
+ return msm_dsi->external_bridge;
+}
+
+/* dsi host */
+struct msm_dsi_host;
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
+ u32 dma_base, u32 len);
+int msm_dsi_host_enable(struct mipi_dsi_host *host);
+int msm_dsi_host_disable(struct mipi_dsi_host *host);
+void msm_dsi_host_enable_irq(struct mipi_dsi_host *host);
+void msm_dsi_host_disable_irq(struct mipi_dsi_host *host);
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_bonded_dsi, struct msm_dsi_phy *phy);
+int msm_dsi_host_power_off(struct mipi_dsi_host *host);
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ const struct drm_display_mode *mode);
+enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
+ const struct drm_display_mode *mode);
+unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
+int msm_dsi_host_register(struct mipi_dsi_host *host);
+void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
+ struct msm_dsi_phy *src_phy);
+int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
+ struct msm_dsi_phy *src_phy);
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_bonded_dsi);
+void msm_dsi_host_destroy(struct mipi_dsi_host *host);
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev);
+int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+int msm_dsi_runtime_suspend(struct device *dev);
+int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
+void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
+void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
+
+/* dsi phy */
+struct msm_dsi_phy;
+struct msm_dsi_phy_shared_timings {
+ u32 clk_post;
+ u32 clk_pre;
+ bool clk_pre_inc_by_2;
+};
+
+struct msm_dsi_phy_clk_request {
+ unsigned long bitclk_rate;
+ unsigned long escclk_rate;
+};
+
+void msm_dsi_phy_driver_register(void);
+void msm_dsi_phy_driver_unregister(void);
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req,
+ struct msm_dsi_phy_shared_timings *shared_timings);
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+ enum msm_dsi_phy_usecase uc);
+void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
+int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
+void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
+bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable);
+
+#endif /* __DSI_CONNECTOR_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 000000000..d1b2a17b0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,788 @@
+#ifndef DSI_XML
+#define DSI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum dsi_traffic_mode {
+ NON_BURST_SYNCH_PULSE = 0,
+ NON_BURST_SYNCH_EVENT = 1,
+ BURST_MODE = 2,
+};
+
+enum dsi_vid_dst_format {
+ VID_DST_FORMAT_RGB565 = 0,
+ VID_DST_FORMAT_RGB666 = 1,
+ VID_DST_FORMAT_RGB666_LOOSE = 2,
+ VID_DST_FORMAT_RGB888 = 3,
+};
+
+enum dsi_rgb_swap {
+ SWAP_RGB = 0,
+ SWAP_RBG = 1,
+ SWAP_BGR = 2,
+ SWAP_BRG = 3,
+ SWAP_GRB = 4,
+ SWAP_GBR = 5,
+};
+
+enum dsi_cmd_trigger {
+ TRIGGER_NONE = 0,
+ TRIGGER_SEOF = 1,
+ TRIGGER_TE = 2,
+ TRIGGER_SW = 4,
+ TRIGGER_SW_SEOF = 5,
+ TRIGGER_SW_TE = 6,
+};
+
+enum dsi_cmd_dst_format {
+ CMD_DST_FORMAT_RGB111 = 0,
+ CMD_DST_FORMAT_RGB332 = 3,
+ CMD_DST_FORMAT_RGB444 = 4,
+ CMD_DST_FORMAT_RGB565 = 6,
+ CMD_DST_FORMAT_RGB666 = 7,
+ CMD_DST_FORMAT_RGB888 = 8,
+};
+
+enum dsi_lane_swap {
+ LANE_SWAP_0123 = 0,
+ LANE_SWAP_3012 = 1,
+ LANE_SWAP_2301 = 2,
+ LANE_SWAP_1230 = 3,
+ LANE_SWAP_0321 = 4,
+ LANE_SWAP_1032 = 5,
+ LANE_SWAP_2103 = 6,
+ LANE_SWAP_3210 = 7,
+};
+
+enum video_config_bpp {
+ VIDEO_CONFIG_18BPP = 0,
+ VIDEO_CONFIG_24BPP = 1,
+};
+
+enum video_pattern_sel {
+ VID_PRBS = 0,
+ VID_INCREMENTAL = 1,
+ VID_FIXED = 2,
+ VID_MDSS_GENERAL_PATTERN = 3,
+};
+
+enum cmd_mdp_stream0_pattern_sel {
+ CMD_MDP_PRBS = 0,
+ CMD_MDP_INCREMENTAL = 1,
+ CMD_MDP_FIXED = 2,
+ CMD_MDP_MDSS_GENERAL_PATTERN = 3,
+};
+
+enum cmd_dma_pattern_sel {
+ CMD_DMA_PRBS = 0,
+ CMD_DMA_INCREMENTAL = 1,
+ CMD_DMA_FIXED = 2,
+ CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3,
+};
+
+#define DSI_IRQ_CMD_DMA_DONE 0x00000001
+#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
+#define DSI_IRQ_CMD_MDP_DONE 0x00000100
+#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
+#define DSI_IRQ_VIDEO_DONE 0x00010000
+#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
+#define DSI_IRQ_BTA_DONE 0x00100000
+#define DSI_IRQ_MASK_BTA_DONE 0x00200000
+#define DSI_IRQ_ERROR 0x01000000
+#define DSI_IRQ_MASK_ERROR 0x02000000
+#define REG_DSI_6G_HW_VERSION 0x00000000
+#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000
+#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK;
+}
+#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000
+#define DSI_6G_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK;
+}
+#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff
+#define DSI_6G_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val)
+{
+ return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK;
+}
+
+#define REG_DSI_CTRL 0x00000000
+#define DSI_CTRL_ENABLE 0x00000001
+#define DSI_CTRL_VID_MODE_EN 0x00000002
+#define DSI_CTRL_CMD_MODE_EN 0x00000004
+#define DSI_CTRL_LANE0 0x00000010
+#define DSI_CTRL_LANE1 0x00000020
+#define DSI_CTRL_LANE2 0x00000040
+#define DSI_CTRL_LANE3 0x00000080
+#define DSI_CTRL_CLK_EN 0x00000100
+#define DSI_CTRL_ECC_CHECK 0x00100000
+#define DSI_CTRL_CRC_CHECK 0x01000000
+
+#define REG_DSI_STATUS0 0x00000004
+#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001
+#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
+#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004
+#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
+#define DSI_STATUS0_DSI_BUSY 0x00000010
+#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000
+
+#define REG_DSI_FIFO_STATUS 0x00000008
+#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_OVERFLOW 0x00000001
+#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_UNDERFLOW 0x00000008
+#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_RD_WATERMARK_REACH 0x00000100
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_WR_WATERMARK_REACH 0x00000200
+#define DSI_FIFO_STATUS_CMD_DMA_FIFO_UNDERFLOW 0x00000400
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_EMPTY 0x00001000
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_FULL 0x00002000
+#define DSI_FIFO_STATUS_DLN0_LP_FIFO_OVERFLOW 0x00004000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_EMPTY 0x00010000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_FULL 0x00020000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_OVERFLOW 0x00040000
+#define DSI_FIFO_STATUS_DLN0_HS_FIFO_UNDERFLOW 0x00080000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_EMPTY 0x00100000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_FULL 0x00200000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_OVERFLOW 0x00400000
+#define DSI_FIFO_STATUS_DLN1_HS_FIFO_UNDERFLOW 0x00800000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_EMPTY 0x01000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_FULL 0x02000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_OVERFLOW 0x04000000
+#define DSI_FIFO_STATUS_DLN2_HS_FIFO_UNDERFLOW 0x08000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_EMPTY 0x10000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_FULL 0x20000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_OVERFLOW 0x40000000
+#define DSI_FIFO_STATUS_DLN3_HS_FIFO_UNDERFLOW 0x80000000
+
+#define REG_DSI_VID_CFG0 0x0000000c
+#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
+#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
+static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
+}
+#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
+#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val)
+{
+ return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
+#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
+static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
+{
+ return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
+}
+#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
+#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
+#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
+#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
+#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
+#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
+
+#define REG_DSI_VID_CFG1 0x0000001c
+#define DSI_VID_CFG1_R_SEL 0x00000001
+#define DSI_VID_CFG1_G_SEL 0x00000010
+#define DSI_VID_CFG1_B_SEL 0x00000100
+#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12
+static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
+}
+
+#define REG_DSI_ACTIVE_H 0x00000020
+#define DSI_ACTIVE_H_START__MASK 0x00000fff
+#define DSI_ACTIVE_H_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
+}
+#define DSI_ACTIVE_H_END__MASK 0x0fff0000
+#define DSI_ACTIVE_H_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_V 0x00000024
+#define DSI_ACTIVE_V_START__MASK 0x00000fff
+#define DSI_ACTIVE_V_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
+}
+#define DSI_ACTIVE_V_END__MASK 0x0fff0000
+#define DSI_ACTIVE_V_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
+}
+
+#define REG_DSI_TOTAL 0x00000028
+#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_ACTIVE_HSYNC 0x0000002c
+#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define DSI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
+}
+#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define DSI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030
+#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034
+#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK;
+}
+
+#define REG_DSI_CMD_DMA_CTRL 0x00000038
+#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000
+#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
+#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
+
+#define REG_DSI_CMD_CFG0 0x0000003c
+#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f
+#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0
+static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val)
+{
+ return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_CMD_CFG0_R_SEL 0x00000010
+#define DSI_CMD_CFG0_G_SEL 0x00000100
+#define DSI_CMD_CFG0_B_SEL 0x00001000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000
+#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20
+static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK;
+}
+#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000
+#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16
+static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK;
+}
+
+#define REG_DSI_CMD_CFG1 0x00000040
+#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff
+#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK;
+}
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00
+#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8
+static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val)
+{
+ return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK;
+}
+#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000
+
+#define REG_DSI_DMA_BASE 0x00000044
+
+#define REG_DSI_DMA_LEN 0x00000048
+
+#define REG_DSI_CMD_MDP_STREAM0_CTRL 0x00000054
+#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM0_TOTAL 0x00000058
+#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM1_CTRL 0x0000005c
+#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_CMD_MDP_STREAM1_TOTAL 0x00000060
+#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK 0x0000ffff
+#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK 0xffff0000
+#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_ACK_ERR_STATUS 0x00000064
+
+static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+#define REG_DSI_TRIG_CTRL 0x00000080
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007
+#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
+static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070
+#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
+static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300
+#define DSI_TRIG_CTRL_STREAM__SHIFT 8
+static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val)
+{
+ return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK;
+}
+#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000
+#define DSI_TRIG_CTRL_TE 0x80000000
+
+#define REG_DSI_TRIG_DMA 0x0000008c
+
+#define REG_DSI_DLN0_PHY_ERR 0x000000b0
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000
+#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000
+
+#define REG_DSI_LP_TIMER_CTRL 0x000000b4
+#define DSI_LP_TIMER_CTRL_LP_RX_TO__MASK 0x0000ffff
+#define DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT 0
+static inline uint32_t DSI_LP_TIMER_CTRL_LP_RX_TO(uint32_t val)
+{
+ return ((val) << DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT) & DSI_LP_TIMER_CTRL_LP_RX_TO__MASK;
+}
+#define DSI_LP_TIMER_CTRL_BTA_TO__MASK 0xffff0000
+#define DSI_LP_TIMER_CTRL_BTA_TO__SHIFT 16
+static inline uint32_t DSI_LP_TIMER_CTRL_BTA_TO(uint32_t val)
+{
+ return ((val) << DSI_LP_TIMER_CTRL_BTA_TO__SHIFT) & DSI_LP_TIMER_CTRL_BTA_TO__MASK;
+}
+
+#define REG_DSI_HS_TIMER_CTRL 0x000000b8
+#define DSI_HS_TIMER_CTRL_HS_TX_TO__MASK 0x0000ffff
+#define DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT 0
+static inline uint32_t DSI_HS_TIMER_CTRL_HS_TX_TO(uint32_t val)
+{
+ return ((val) << DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT) & DSI_HS_TIMER_CTRL_HS_TX_TO__MASK;
+}
+#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK 0x000f0000
+#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT 16
+static inline uint32_t DSI_HS_TIMER_CTRL_TIMER_RESOLUTION(uint32_t val)
+{
+ return ((val) << DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT) & DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK;
+}
+#define DSI_HS_TIMER_CTRL_HS_TX_TO_STOP_EN 0x10000000
+
+#define REG_DSI_TIMEOUT_STATUS 0x000000bc
+
+#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
+}
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
+}
+
+#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
+#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
+#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
+
+#define REG_DSI_LANE_STATUS 0x000000a4
+#define DSI_LANE_STATUS_DLN0_STOPSTATE 0x00000001
+#define DSI_LANE_STATUS_DLN1_STOPSTATE 0x00000002
+#define DSI_LANE_STATUS_DLN2_STOPSTATE 0x00000004
+#define DSI_LANE_STATUS_DLN3_STOPSTATE 0x00000008
+#define DSI_LANE_STATUS_CLKLN_STOPSTATE 0x00000010
+#define DSI_LANE_STATUS_DLN0_ULPS_ACTIVE_NOT 0x00000100
+#define DSI_LANE_STATUS_DLN1_ULPS_ACTIVE_NOT 0x00000200
+#define DSI_LANE_STATUS_DLN2_ULPS_ACTIVE_NOT 0x00000400
+#define DSI_LANE_STATUS_DLN3_ULPS_ACTIVE_NOT 0x00000800
+#define DSI_LANE_STATUS_CLKLN_ULPS_ACTIVE_NOT 0x00001000
+#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000
+
+#define REG_DSI_LANE_CTRL 0x000000a8
+#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000
+#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000
+
+#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007
+#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0
+static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val)
+{
+ return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK;
+}
+
+#define REG_DSI_ERR_INT_MASK0 0x00000108
+
+#define REG_DSI_INTR_CTRL 0x0000010c
+
+#define REG_DSI_RESET 0x00000114
+
+#define REG_DSI_CLK_CTRL 0x00000118
+#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001
+#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002
+#define DSI_CLK_CTRL_PCLK_ON 0x00000004
+#define DSI_CLK_CTRL_DSICLK_ON 0x00000008
+#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010
+#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020
+#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200
+
+#define REG_DSI_CLK_STATUS 0x0000011c
+#define DSI_CLK_STATUS_DSI_AON_AHBM_HCLK_ACTIVE 0x00000001
+#define DSI_CLK_STATUS_DSI_DYN_AHBM_HCLK_ACTIVE 0x00000002
+#define DSI_CLK_STATUS_DSI_AON_AHBS_HCLK_ACTIVE 0x00000004
+#define DSI_CLK_STATUS_DSI_DYN_AHBS_HCLK_ACTIVE 0x00000008
+#define DSI_CLK_STATUS_DSI_AON_DSICLK_ACTIVE 0x00000010
+#define DSI_CLK_STATUS_DSI_DYN_DSICLK_ACTIVE 0x00000020
+#define DSI_CLK_STATUS_DSI_AON_BYTECLK_ACTIVE 0x00000040
+#define DSI_CLK_STATUS_DSI_DYN_BYTECLK_ACTIVE 0x00000080
+#define DSI_CLK_STATUS_DSI_AON_ESCCLK_ACTIVE 0x00000100
+#define DSI_CLK_STATUS_DSI_AON_PCLK_ACTIVE 0x00000200
+#define DSI_CLK_STATUS_DSI_DYN_PCLK_ACTIVE 0x00000400
+#define DSI_CLK_STATUS_DSI_DYN_CMD_PCLK_ACTIVE 0x00001000
+#define DSI_CLK_STATUS_DSI_CMD_PCLK_ACTIVE 0x00002000
+#define DSI_CLK_STATUS_DSI_VID_PCLK_ACTIVE 0x00004000
+#define DSI_CLK_STATUS_DSI_CAM_BIST_PCLK_ACT 0x00008000
+#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000
+
+#define REG_DSI_PHY_RESET 0x00000128
+#define DSI_PHY_RESET_RESET 0x00000001
+
+#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160
+
+#define REG_DSI_TPG_MAIN_CONTROL 0x00000198
+#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100
+
+#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0
+#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003
+#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0
+static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val)
+{
+ return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK;
+}
+#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004
+
+#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030
+#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4
+static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val)
+{
+ return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK;
+}
+#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004
+#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002
+#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001
+
+#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168
+
+#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001
+
+#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000
+#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000
+
+#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c
+#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001
+
+#define REG_DSI_CMD_MODE_MDP_CTRL2 0x000001b4
+#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK 0x0000000f
+#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT 0
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2(enum dsi_cmd_dst_format val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_R_SEL 0x00000010
+#define DSI_CMD_MODE_MDP_CTRL2_G_SEL 0x00000020
+#define DSI_CMD_MODE_MDP_CTRL2_B_SEL 0x00000040
+#define DSI_CMD_MODE_MDP_CTRL2_BYTE_MSB_LSB_FLIP 0x00000080
+#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK 0x00000700
+#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT 8
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK 0x00007000
+#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT 12
+static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK;
+}
+#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE 0x00010000
+
+#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL 0x000001b8
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK 0x0000003f
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT 0
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE(uint32_t val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK;
+}
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT 8
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK;
+}
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK 0xffff0000
+#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT 16
+static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT(uint32_t val)
+{
+ return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK;
+}
+
+#define REG_DSI_RDBK_DATA_CTRL 0x000001d0
+#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000
+#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16
+static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val)
+{
+ return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK;
+}
+#define DSI_RDBK_DATA_CTRL_CLR 0x00000001
+
+#define REG_DSI_VERSION 0x000001f0
+#define DSI_VERSION_MAJOR__MASK 0xff000000
+#define DSI_VERSION_MAJOR__SHIFT 24
+static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK;
+}
+
+#define REG_DSI_CPHY_MODE_CTRL 0x000002d4
+
+#define REG_DSI_VIDEO_COMPRESSION_MODE_CTRL 0x0000029c
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK 0xffff0000
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT 16
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK 0x00003f00
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT 8
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK 0x000000c0
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT 6
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK 0x00000030
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT 4
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EN 0x00000001
+
+#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL 0x000002a4
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK 0x3f000000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT 24
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK 0x00c00000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT 22
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK 0x00300000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT 20
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EN 0x00010000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK 0x00003f00
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT 8
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK 0x000000c0
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT 6
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK 0x00000030
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT 4
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EN 0x00000001
+
+#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2 0x000002a8
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK 0xffff0000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT 16
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK 0x0000ffff
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT 0
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
+}
+
+#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
new file mode 100644
index 000000000..e0bd452a9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi_cfg.h"
+
+static const char * const dsi_v2_bus_clk_names[] = {
+ "core_mmss", "iface", "bus",
+};
+
+static const struct regulator_bulk_data apq8064_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
+static const struct msm_dsi_config apq8064_dsi_cfg = {
+ .io_offset = 0,
+ .regulator_data = apq8064_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(apq8064_dsi_regulators),
+ .bus_clk_names = dsi_v2_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
+ .io_start = { 0x4700000, 0x5800000 },
+ .num_dsi = 2,
+};
+
+static const char * const dsi_6g_bus_clk_names[] = {
+ "mdp_core", "iface", "bus", "core_mmss",
+};
+
+static const struct regulator_bulk_data msm8974_apq8084_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
+static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = msm8974_apq8084_regulators,
+ .num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators),
+ .bus_clk_names = dsi_6g_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd922800, 0xfd922b00 },
+ .num_dsi = 2,
+};
+
+static const char * const dsi_8916_bus_clk_names[] = {
+ "mdp_core", "iface", "bus",
+};
+
+static const struct regulator_bulk_data msm8916_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
+static const struct msm_dsi_config msm8916_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = msm8916_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8916_dsi_regulators),
+ .bus_clk_names = dsi_8916_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
+ .io_start = { 0x1a98000 },
+ .num_dsi = 1,
+};
+
+static const char * const dsi_8976_bus_clk_names[] = {
+ "mdp_core", "iface", "bus",
+};
+
+static const struct regulator_bulk_data msm8976_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
+static const struct msm_dsi_config msm8976_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = msm8976_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8976_dsi_regulators),
+ .bus_clk_names = dsi_8976_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
+ .io_start = { 0x1a94000, 0x1a96000 },
+ .num_dsi = 2,
+};
+
+static const struct regulator_bulk_data msm8994_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+ { .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "lab_reg", .init_load_uA = -1 },
+ { .supply = "ibb_reg", .init_load_uA = -1 },
+};
+
+static const struct msm_dsi_config msm8994_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = msm8994_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8994_dsi_regulators),
+ .bus_clk_names = dsi_6g_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd998000, 0xfd9a0000 },
+ .num_dsi = 2,
+};
+
+static const char * const dsi_8996_bus_clk_names[] = {
+ "mdp_core", "iface", "bus", "core_mmss",
+};
+
+static const struct regulator_bulk_data msm8996_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */
+ { .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
+static const struct msm_dsi_config msm8996_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = msm8996_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8996_dsi_regulators),
+ .bus_clk_names = dsi_8996_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
+ .io_start = { 0x994000, 0x996000 },
+ .num_dsi = 2,
+};
+
+static const char * const dsi_msm8998_bus_clk_names[] = {
+ "iface", "bus", "core",
+};
+
+static const struct regulator_bulk_data msm8998_dsi_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */
+ { .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config msm8998_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = msm8998_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8998_dsi_regulators),
+ .bus_clk_names = dsi_msm8998_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
+ .io_start = { 0xc994000, 0xc996000 },
+ .num_dsi = 2,
+};
+
+static const char * const dsi_sdm660_bus_clk_names[] = {
+ "iface", "bus", "core", "core_mmss",
+};
+
+static const struct regulator_bulk_data sdm660_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sdm660_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sdm660_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm660_dsi_regulators),
+ .bus_clk_names = dsi_sdm660_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
+ .io_start = { 0xc994000, 0xc996000 },
+ .num_dsi = 2,
+};
+
+static const char * const dsi_sdm845_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const char * const dsi_sc7180_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct regulator_bulk_data sdm845_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sdm845_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sdm845_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm845_dsi_regulators),
+ .bus_clk_names = dsi_sdm845_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
+ .io_start = { 0xae94000, 0xae96000 },
+ .num_dsi = 2,
+};
+
+static const struct regulator_bulk_data sc7180_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sc7180_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sc7180_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7180_dsi_regulators),
+ .bus_clk_names = dsi_sc7180_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
+ .io_start = { 0xae94000 },
+ .num_dsi = 1,
+};
+
+static const char * const dsi_sc7280_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sc7280_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sc7280_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
+ .bus_clk_names = dsi_sc7280_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
+ .io_start = { 0xae94000, 0xae96000 },
+ .num_dsi = 2,
+};
+
+static const char * const dsi_qcm2290_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct regulator_bulk_data qcm2290_dsi_cfg_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config qcm2290_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = qcm2290_dsi_cfg_regulators,
+ .num_regulators = ARRAY_SIZE(qcm2290_dsi_cfg_regulators),
+ .bus_clk_names = dsi_qcm2290_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names),
+ .io_start = { 0x5e94000 },
+ .num_dsi = 1,
+};
+
+static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_v2,
+ .link_clk_enable = dsi_link_clk_enable_v2,
+ .link_clk_disable = dsi_link_clk_disable_v2,
+ .clk_init_ver = dsi_clk_init_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_v2,
+ .tx_buf_get = dsi_tx_buf_get_v2,
+ .tx_buf_put = NULL,
+ .dma_base_get = dsi_dma_base_get_v2,
+ .calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = NULL,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_set_rate = dsi_link_clk_set_rate_6g,
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
+ {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+ &apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+ &msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+ &msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+ &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2,
+ &msm8976_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_1_0,
+ &sdm660_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0,
+ &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
+ &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
+ &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+ int i;
+
+ for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) {
+ if ((dsi_cfg_handlers[i].major == major) &&
+ (dsi_cfg_handlers[i].minor == minor)) {
+ cfg_hnd = &dsi_cfg_handlers[i];
+ break;
+ }
+ }
+
+ return cfg_hnd;
+}
+
+/* Non autodetect configs */
+const struct msm_dsi_cfg_handler qcm2290_dsi_cfg_handler = {
+ .cfg = &qcm2290_dsi_cfg,
+ .ops = &msm_dsi_6g_v2_host_ops,
+};
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
new file mode 100644
index 000000000..8f04e685a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_DSI_CFG_H__
+#define __MSM_DSI_CFG_H__
+
+#include "dsi.h"
+
+#define MSM_DSI_VER_MAJOR_V2 0x02
+#define MSM_DSI_VER_MAJOR_6G 0x03
+#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000
+#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000
+#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001
+#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000
+#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
+#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
+#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
+#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002
+#define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000
+#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
+#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
+#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
+#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
+#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
+
+#define MSM_DSI_V2_VER_MINOR_8064 0x0
+
+#define DSI_6G_REG_SHIFT 4
+
+struct msm_dsi_config {
+ u32 io_offset;
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
+ const char * const *bus_clk_names;
+ const int num_bus_clks;
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi;
+};
+
+struct msm_dsi_host_cfg_ops {
+ int (*link_clk_set_rate)(struct msm_dsi_host *msm_host);
+ int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+ void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+ int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+ int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+ void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+ void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+ int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
+};
+
+struct msm_dsi_cfg_handler {
+ u32 major;
+ u32 minor;
+ const struct msm_dsi_config *cfg;
+ const struct msm_dsi_host_cfg_ops *ops;
+};
+
+const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
+
+/* Non autodetect configs */
+extern const struct msm_dsi_cfg_handler qcm2290_dsi_cfg_handler;
+
+#endif /* __MSM_DSI_CFG_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
new file mode 100644
index 000000000..a7c6e8a17
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -0,0 +1,2630 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/display/drm_dsc_helper.h>
+#include <drm/drm_of.h>
+
+#include "dsi.h"
+#include "dsi.xml.h"
+#include "sfpb.xml.h"
+#include "dsi_cfg.h"
+#include "msm_kms.h"
+#include "msm_gem.h"
+#include "phy/dsi_phy.h"
+
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
+static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc);
+
+static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+{
+ u32 ver;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ /*
+ * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
+ * makes all other registers 4-byte shifted down.
+ *
+ * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
+ * older, we read the DSI_VERSION register without any shift(offset
+ * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
+ * the case of DSI6G, this has to be zero (the offset points to a
+ * scratch register which we never touch)
+ */
+
+ ver = msm_readl(base + REG_DSI_VERSION);
+ if (ver) {
+ /* older dsi host, there is no register shift */
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver <= MSM_DSI_VER_MAJOR_V2) {
+ /* old versions */
+ *major = ver;
+ *minor = 0;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ /*
+ * newer host, offset 0 has 6G_HW_VERSION, the rest of the
+ * registers are shifted down, read DSI_VERSION again with
+ * the shifted offset
+ */
+ ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
+ ver = FIELD(ver, DSI_VERSION_MAJOR);
+ if (ver == MSM_DSI_VER_MAJOR_6G) {
+ /* 6G version */
+ *major = ver;
+ *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+}
+
+#define DSI_ERR_STATE_ACK 0x0000
+#define DSI_ERR_STATE_TIMEOUT 0x0001
+#define DSI_ERR_STATE_DLN0_PHY 0x0002
+#define DSI_ERR_STATE_FIFO 0x0004
+#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
+#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
+#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
+
+#define DSI_CLK_CTRL_ENABLE_CLKS \
+ (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
+ DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
+ DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
+ DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
+
+struct msm_dsi_host {
+ struct mipi_dsi_host base;
+
+ struct platform_device *pdev;
+ struct drm_device *dev;
+
+ int id;
+
+ void __iomem *ctrl_base;
+ phys_addr_t ctrl_size;
+ struct regulator_bulk_data *supplies;
+
+ int num_bus_clks;
+ struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
+
+ struct clk *byte_clk;
+ struct clk *esc_clk;
+ struct clk *pixel_clk;
+ struct clk *byte_clk_src;
+ struct clk *pixel_clk_src;
+ struct clk *byte_intf_clk;
+
+ unsigned long byte_clk_rate;
+ unsigned long pixel_clk_rate;
+ unsigned long esc_clk_rate;
+
+ /* DSI v2 specific clocks */
+ struct clk *src_clk;
+ struct clk *esc_clk_src;
+ struct clk *dsi_clk_src;
+
+ unsigned long src_clk_rate;
+
+ struct gpio_desc *disp_en_gpio;
+ struct gpio_desc *te_gpio;
+
+ const struct msm_dsi_cfg_handler *cfg_hnd;
+
+ struct completion dma_comp;
+ struct completion video_comp;
+ struct mutex dev_mutex;
+ struct mutex cmd_mutex;
+ spinlock_t intr_lock; /* Protect interrupt ctrl register */
+
+ u32 err_work_state;
+ struct work_struct err_work;
+ struct workqueue_struct *workqueue;
+
+ /* DSI 6G TX buffer*/
+ struct drm_gem_object *tx_gem_obj;
+ struct msm_gem_address_space *aspace;
+
+ /* DSI v2 TX buffer */
+ void *tx_buf;
+ dma_addr_t tx_buf_paddr;
+
+ int tx_size;
+
+ u8 *rx_buf;
+
+ struct regmap *sfpb;
+
+ struct drm_display_mode *mode;
+ struct drm_dsc_config *dsc;
+
+ /* connected device info */
+ unsigned int channel;
+ unsigned int lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+
+ /* lane data parsed via DT */
+ int dlane_swap;
+ int num_data_lanes;
+
+ /* from phy DT */
+ bool cphy_mode;
+
+ u32 dma_cmd_ctrl_restore;
+
+ bool registered;
+ bool power_on;
+ bool enabled;
+ int irq;
+};
+
+static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB565: return 16;
+ case MIPI_DSI_FMT_RGB666_PACKED: return 18;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default: return 24;
+ }
+}
+
+static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
+{
+ return msm_readl(msm_host->ctrl_base + reg);
+}
+static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
+{
+ msm_writel(data, msm_host->ctrl_base + reg);
+}
+
+static const struct msm_dsi_cfg_handler *dsi_get_config(
+ struct msm_dsi_host *msm_host)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
+ struct device *dev = &msm_host->pdev->dev;
+ struct clk *ahb_clk;
+ int ret;
+ u32 major = 0, minor = 0;
+
+ cfg_hnd = device_get_match_data(dev);
+ if (cfg_hnd)
+ return cfg_hnd;
+
+ ahb_clk = msm_clk_get(msm_host->pdev, "iface");
+ if (IS_ERR(ahb_clk)) {
+ pr_err("%s: cannot get interface clock\n", __func__);
+ goto exit;
+ }
+
+ pm_runtime_get_sync(dev);
+
+ ret = clk_prepare_enable(ahb_clk);
+ if (ret) {
+ pr_err("%s: unable to enable ahb_clk\n", __func__);
+ goto runtime_put;
+ }
+
+ ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
+ if (ret) {
+ pr_err("%s: Invalid version\n", __func__);
+ goto disable_clks;
+ }
+
+ cfg_hnd = msm_dsi_cfg_get(major, minor);
+
+ DBG("%s: Version %x:%x\n", __func__, major, minor);
+
+disable_clks:
+ clk_disable_unprepare(ahb_clk);
+runtime_put:
+ pm_runtime_put_sync(dev);
+exit:
+ return cfg_hnd;
+}
+
+static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct msm_dsi_host, base);
+}
+
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->src_clk = msm_clk_get(pdev, "src");
+
+ if (IS_ERR(msm_host->src_clk)) {
+ ret = PTR_ERR(msm_host->src_clk);
+ pr_err("%s: can't find src clock. ret=%d\n",
+ __func__, ret);
+ msm_host->src_clk = NULL;
+ return ret;
+ }
+
+ msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+ if (!msm_host->esc_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+ if (!msm_host->dsi_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get src clock parent. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int dsi_clk_init(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ const struct msm_dsi_config *cfg = cfg_hnd->cfg;
+ int i, ret = 0;
+
+ /* get bus clocks */
+ for (i = 0; i < cfg->num_bus_clks; i++)
+ msm_host->bus_clks[i].id = cfg->bus_clk_names[i];
+ msm_host->num_bus_clks = cfg->num_bus_clks;
+
+ ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
+ goto exit;
+ }
+
+ /* get link and source clocks */
+ msm_host->byte_clk = msm_clk_get(pdev, "byte");
+ if (IS_ERR(msm_host->byte_clk)) {
+ ret = PTR_ERR(msm_host->byte_clk);
+ pr_err("%s: can't find dsi_byte clock. ret=%d\n",
+ __func__, ret);
+ msm_host->byte_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
+ if (IS_ERR(msm_host->pixel_clk)) {
+ ret = PTR_ERR(msm_host->pixel_clk);
+ pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
+ __func__, ret);
+ msm_host->pixel_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->esc_clk = msm_clk_get(pdev, "core");
+ if (IS_ERR(msm_host->esc_clk)) {
+ ret = PTR_ERR(msm_host->esc_clk);
+ pr_err("%s: can't find dsi_esc clock. ret=%d\n",
+ __func__, ret);
+ msm_host->esc_clk = NULL;
+ goto exit;
+ }
+
+ msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
+ if (IS_ERR(msm_host->byte_clk_src)) {
+ ret = PTR_ERR(msm_host->byte_clk_src);
+ pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
+ goto exit;
+ }
+
+ msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
+ if (IS_ERR(msm_host->pixel_clk_src)) {
+ ret = PTR_ERR(msm_host->pixel_clk_src);
+ pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
+ goto exit;
+ }
+
+ if (cfg_hnd->ops->clk_init_ver)
+ ret = cfg_hnd->ops->clk_init_ver(msm_host);
+exit:
+ return ret;
+}
+
+int msm_dsi_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks);
+
+ return 0;
+}
+
+int msm_dsi_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks);
+}
+
+int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
+{
+ unsigned long byte_intf_rate;
+ int ret;
+
+ DBG("Set clk rates: pclk=%d, byteclk=%lu",
+ msm_host->mode->clock, msm_host->byte_clk_rate);
+
+ ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
+ msm_host->byte_clk_rate);
+ if (ret) {
+ pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+ return ret;
+ }
+
+ if (msm_host->byte_intf_clk) {
+ /* For CPHY, byte_intf_clk is same as byte_clk */
+ if (msm_host->cphy_mode)
+ byte_intf_rate = msm_host->byte_clk_rate;
+ else
+ byte_intf_rate = msm_host->byte_clk_rate / 2;
+
+ ret = clk_set_rate(msm_host->byte_intf_clk, byte_intf_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte intf clk, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ ret = clk_prepare_enable(msm_host->esc_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->byte_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto byte_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->byte_intf_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable byte intf clk\n",
+ __func__);
+ goto byte_intf_clk_err;
+ }
+
+ return 0;
+
+byte_intf_clk_err:
+ clk_disable_unprepare(msm_host->pixel_clk);
+pixel_clk_err:
+ clk_disable_unprepare(msm_host->byte_clk);
+byte_clk_err:
+ clk_disable_unprepare(msm_host->esc_clk);
+error:
+ return ret;
+}
+
+int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
+ msm_host->mode->clock, msm_host->byte_clk_rate,
+ msm_host->esc_clk_rate, msm_host->src_clk_rate);
+
+ ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
+ if (ret) {
+ pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+{
+ int ret;
+
+ ret = clk_prepare_enable(msm_host->byte_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi byte clk\n", __func__);
+ goto error;
+ }
+
+ ret = clk_prepare_enable(msm_host->esc_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi esc clk\n", __func__);
+ goto esc_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->src_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi src clk\n", __func__);
+ goto src_clk_err;
+ }
+
+ ret = clk_prepare_enable(msm_host->pixel_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
+ goto pixel_clk_err;
+ }
+
+ return 0;
+
+pixel_clk_err:
+ clk_disable_unprepare(msm_host->src_clk);
+src_clk_err:
+ clk_disable_unprepare(msm_host->esc_clk);
+esc_clk_err:
+ clk_disable_unprepare(msm_host->byte_clk);
+error:
+ return ret;
+}
+
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
+{
+ /* Drop the performance state vote */
+ dev_pm_opp_set_rate(&msm_host->pdev->dev, 0);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
+
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->src_clk);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
+
+static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ unsigned long pclk_rate;
+
+ pclk_rate = mode->clock * 1000;
+
+ /*
+ * For bonded DSI mode, the current DRM mode has the complete width of the
+ * panel. Since, the complete panel is driven by two DSI controllers,
+ * the clock rates have to be split between the two dsi controllers.
+ * Adjust the byte and pixel clock rates for each dsi host accordingly.
+ */
+ if (is_bonded_dsi)
+ pclk_rate /= 2;
+
+ return pclk_rate;
+}
+
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+{
+ u8 lanes = msm_host->lanes;
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
+ u64 pclk_bpp = (u64)pclk_rate * bpp;
+
+ if (lanes == 0) {
+ pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+ lanes = 1;
+ }
+
+ /* CPHY "byte_clk" is in units of 16 bits */
+ if (msm_host->cphy_mode)
+ do_div(pclk_bpp, (16 * lanes));
+ else
+ do_div(pclk_bpp, (8 * lanes));
+
+ msm_host->pixel_clk_rate = pclk_rate;
+ msm_host->byte_clk_rate = pclk_bpp;
+
+ DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
+ msm_host->byte_clk_rate);
+
+}
+
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+{
+ if (!msm_host->mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_calc_pclk(msm_host, is_bonded_dsi);
+ msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ return 0;
+}
+
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+{
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u64 pclk_bpp;
+ unsigned int esc_mhz, esc_div;
+ unsigned long byte_mhz;
+
+ dsi_calc_pclk(msm_host, is_bonded_dsi);
+
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_bonded_dsi) * bpp;
+ do_div(pclk_bpp, 8);
+ msm_host->src_clk_rate = pclk_bpp;
+
+ /*
+ * esc clock is byte clock followed by a 4 bit divider,
+ * we need to find an escape clock frequency within the
+ * mipi DSI spec range within the maximum divider limit
+ * We iterate here between an escape clock frequencey
+ * between 20 Mhz to 5 Mhz and pick up the first one
+ * that can be supported by our divider
+ */
+
+ byte_mhz = msm_host->byte_clk_rate / 1000000;
+
+ for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+ esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+
+ /*
+ * TODO: Ideally, we shouldn't know what sort of divider
+ * is available in mmss_cc, we're just assuming that
+ * it'll always be a 4 bit divider. Need to come up with
+ * a better way here.
+ */
+ if (esc_div >= 1 && esc_div <= 16)
+ break;
+ }
+
+ if (esc_mhz < 5)
+ return -EINVAL;
+
+ msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+ DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate,
+ msm_host->src_clk_rate);
+
+ return 0;
+}
+
+static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
+{
+ u32 intr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+
+ if (enable)
+ intr |= mask;
+ else
+ intr &= ~mask;
+
+ DBG("intr=%x enable=%d", intr, enable);
+
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+}
+
+static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
+{
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ return BURST_MODE;
+ else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ return NON_BURST_SYNCH_PULSE;
+
+ return NON_BURST_SYNCH_EVENT;
+}
+
+static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
+ case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
+ default: return VID_DST_FORMAT_RGB888;
+ }
+}
+
+static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
+ const enum mipi_dsi_pixel_format mipi_fmt)
+{
+ switch (mipi_fmt) {
+ case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
+ default: return CMD_DST_FORMAT_RGB888;
+ }
+}
+
+static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
+ struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy)
+{
+ u32 flags = msm_host->mode_flags;
+ enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ u32 data = 0, lane_ctrl = 0;
+
+ if (!enable) {
+ dsi_write(msm_host, REG_DSI_CTRL, 0);
+ return;
+ }
+
+ if (flags & MIPI_DSI_MODE_VIDEO) {
+ if (flags & MIPI_DSI_MODE_VIDEO_HSE)
+ data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
+ if (flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ data |= DSI_VID_CFG0_HFP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ data |= DSI_VID_CFG0_HBP_POWER_STOP;
+ if (flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ data |= DSI_VID_CFG0_HSA_POWER_STOP;
+ /* Always set low power stop mode for BLLP
+ * to let command engine send packets
+ */
+ data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
+ DSI_VID_CFG0_BLLP_POWER_STOP;
+ data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
+ data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
+ data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
+ dsi_write(msm_host, REG_DSI_VID_CFG0, data);
+
+ /* Do not swap RGB colors */
+ data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
+ dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
+ } else {
+ /* Do not swap RGB colors */
+ data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
+ data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
+ dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
+
+ data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
+ DSI_CMD_CFG1_WR_MEM_CONTINUE(
+ MIPI_DCS_WRITE_MEMORY_CONTINUE);
+ /* Always insert DCS command */
+ data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
+ dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
+ }
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
+ DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
+ DSI_CMD_DMA_CTRL_LOW_POWER);
+
+ data = 0;
+ /* Always assume dedicated TE pin */
+ data |= DSI_TRIG_CTRL_TE;
+ data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
+ data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
+ data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
+ data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
+ dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
+
+ data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
+ DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
+ dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
+
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
+ phy_shared_timings->clk_pre_inc_by_2)
+ dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
+ DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
+
+ data = 0;
+ if (!(flags & MIPI_DSI_MODE_NO_EOT_PACKET))
+ data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
+ dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
+
+ /* allow only ack-err-status to generate interrupt */
+ dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+
+ data = DSI_CTRL_CLK_EN;
+
+ DBG("lane number=%d", msm_host->lanes);
+ data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
+
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
+
+ if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) {
+ lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL);
+
+ if (msm_dsi_phy_set_continuous_clock(phy, enable))
+ lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY;
+
+ dsi_write(msm_host, REG_DSI_LANE_CTRL,
+ lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
+ }
+
+ data |= DSI_CTRL_ENABLE;
+
+ dsi_write(msm_host, REG_DSI_CTRL, data);
+
+ if (msm_host->cphy_mode)
+ dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
+}
+
+static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+{
+ struct drm_dsc_config *dsc = msm_host->dsc;
+ u32 reg, reg_ctrl, reg_ctrl2;
+ u32 slice_per_intf, total_bytes_per_intf;
+ u32 pkt_per_line;
+ u32 eol_byte_num;
+
+ /* first calculate dsc parameters and then program
+ * compress mode registers
+ */
+ slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+
+ total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
+
+ eol_byte_num = total_bytes_per_intf % 3;
+
+ /*
+ * Typically, pkt_per_line = slice_per_intf * slice_per_pkt.
+ *
+ * Since the current driver only supports slice_per_pkt = 1,
+ * pkt_per_line will be equal to slice per intf for now.
+ */
+ pkt_per_line = slice_per_intf;
+
+ if (is_cmd_mode) /* packet data type */
+ reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
+ else
+ reg = DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(MIPI_DSI_COMPRESSED_PIXEL_STREAM);
+
+ /* DSI_VIDEO_COMPRESSION_MODE & DSI_COMMAND_COMPRESSION_MODE
+ * registers have similar offsets, so for below common code use
+ * DSI_VIDEO_COMPRESSION_MODE_XXXX for setting bits
+ */
+ reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(pkt_per_line >> 1);
+ reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(eol_byte_num);
+ reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EN;
+
+ if (is_cmd_mode) {
+ reg_ctrl = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL);
+ reg_ctrl2 = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2);
+
+ reg_ctrl &= ~0xffff;
+ reg_ctrl |= reg;
+
+ reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
+ reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(dsc->slice_chunk_size);
+
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+ } else {
+ dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg);
+ }
+}
+
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+{
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
+ u32 h_total = mode->htotal;
+ u32 v_total = mode->vtotal;
+ u32 hs_end = mode->hsync_end - mode->hsync_start;
+ u32 vs_end = mode->vsync_end - mode->vsync_start;
+ u32 ha_start = h_total - mode->hsync_start;
+ u32 ha_end = ha_start + mode->hdisplay;
+ u32 va_start = v_total - mode->vsync_start;
+ u32 va_end = va_start + mode->vdisplay;
+ u32 hdisplay = mode->hdisplay;
+ u32 wc;
+ int ret;
+
+ DBG("");
+
+ /*
+ * For bonded DSI mode, the current DRM mode has
+ * the complete width of the panel. Since, the complete
+ * panel is driven by two DSI controllers, the horizontal
+ * timings have to be split between the two dsi controllers.
+ * Adjust the DSI host timing values accordingly.
+ */
+ if (is_bonded_dsi) {
+ h_total /= 2;
+ hs_end /= 2;
+ ha_start /= 2;
+ ha_end /= 2;
+ hdisplay /= 2;
+ }
+
+ if (msm_host->dsc) {
+ struct drm_dsc_config *dsc = msm_host->dsc;
+
+ /* update dsc params with timing params */
+ if (!dsc || !mode->hdisplay || !mode->vdisplay) {
+ pr_err("DSI: invalid input: pic_width: %d pic_height: %d\n",
+ mode->hdisplay, mode->vdisplay);
+ return;
+ }
+
+ dsc->pic_width = mode->hdisplay;
+ dsc->pic_height = mode->vdisplay;
+ DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height);
+
+ /* we do the calculations for dsc parameters here so that
+ * panel can use these parameters
+ */
+ ret = dsi_populate_dsc_params(msm_host, dsc);
+ if (ret)
+ return;
+
+ /* Divide the display by 3 but keep back/font porch and
+ * pulse width same
+ */
+ h_total -= hdisplay;
+ hdisplay /= 3;
+ h_total += hdisplay;
+ ha_end = ha_start + hdisplay;
+ }
+
+ if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ if (msm_host->dsc)
+ dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
+
+ dsi_write(msm_host, REG_DSI_ACTIVE_H,
+ DSI_ACTIVE_H_START(ha_start) |
+ DSI_ACTIVE_H_END(ha_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_V,
+ DSI_ACTIVE_V_START(va_start) |
+ DSI_ACTIVE_V_END(va_end));
+ dsi_write(msm_host, REG_DSI_TOTAL,
+ DSI_TOTAL_H_TOTAL(h_total - 1) |
+ DSI_TOTAL_V_TOTAL(v_total - 1));
+
+ dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
+ DSI_ACTIVE_HSYNC_START(hs_start) |
+ DSI_ACTIVE_HSYNC_END(hs_end));
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
+ dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
+ DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
+ DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
+ } else { /* command mode */
+ if (msm_host->dsc)
+ dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
+
+ /* image data and 1 byte write_memory_start cmd */
+ if (!msm_host->dsc)
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ else
+ /*
+ * When DSC is enabled, WC = slice_chunk_size * slice_per_pkt + 1.
+ * Currently, the driver only supports default value of slice_per_pkt = 1
+ *
+ * TODO: Expand mipi_dsi_device struct to hold slice_per_pkt info
+ * and adjust DSC math to account for slice_per_pkt.
+ */
+ wc = msm_host->dsc->slice_chunk_size + 1;
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
+ DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
+ DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(
+ msm_host->channel) |
+ DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(
+ MIPI_DSI_DCS_LONG_WRITE));
+
+ dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL,
+ DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) |
+ DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay));
+ }
+}
+
+static void dsi_sw_reset(struct msm_dsi_host *msm_host)
+{
+ u32 ctrl;
+
+ ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+ if (ctrl & DSI_CTRL_ENABLE) {
+ dsi_write(msm_host, REG_DSI_CTRL, ctrl & ~DSI_CTRL_ENABLE);
+ /*
+ * dsi controller need to be disabled before
+ * clocks turned on
+ */
+ wmb();
+ }
+
+ dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
+ wmb(); /* clocks need to be enabled before reset */
+
+ /* dsi controller can only be reset while clocks are running */
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb(); /* controller out of reset */
+
+ if (ctrl & DSI_CTRL_ENABLE) {
+ dsi_write(msm_host, REG_DSI_CTRL, ctrl);
+ wmb(); /* make sure dsi controller enabled again */
+ }
+}
+
+static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
+ bool video_mode, bool enable)
+{
+ u32 dsi_ctrl;
+
+ dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
+
+ if (!enable) {
+ dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
+ DSI_CTRL_CMD_MODE_EN);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
+ DSI_IRQ_MASK_VIDEO_DONE, 0);
+ } else {
+ if (video_mode) {
+ dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
+ } else { /* command mode */
+ dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
+ }
+ dsi_ctrl |= DSI_CTRL_ENABLE;
+ }
+
+ dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
+}
+
+static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
+{
+ u32 data;
+
+ data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
+
+ if (mode == 0)
+ data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
+ else
+ data |= DSI_CMD_DMA_CTRL_LOW_POWER;
+
+ dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
+}
+
+static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
+{
+ u32 ret = 0;
+ struct device *dev = &msm_host->pdev->dev;
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
+
+ reinit_completion(&msm_host->video_comp);
+
+ ret = wait_for_completion_timeout(&msm_host->video_comp,
+ msecs_to_jiffies(70));
+
+ if (ret == 0)
+ DRM_DEV_ERROR(dev, "wait for video done timed out\n");
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
+}
+
+static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
+{
+ u32 data;
+
+ if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
+ return;
+
+ data = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ /* if video mode engine is not busy, its because
+ * either timing engine was not turned on or the
+ * DSI controller has finished transmitting the video
+ * data already, so no need to wait in those cases
+ */
+ if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
+ return;
+
+ if (msm_host->power_on && msm_host->enabled) {
+ dsi_wait4video_done(msm_host);
+ /* delay 4 ms to skip BLLP */
+ usleep_range(2000, 4000);
+ }
+}
+
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ uint64_t iova;
+ u8 *data;
+
+ msm_host->aspace = msm_gem_address_space_get(priv->kms->aspace);
+
+ data = msm_gem_kernel_new(dev, size, MSM_BO_WC,
+ msm_host->aspace,
+ &msm_host->tx_gem_obj, &iova);
+
+ if (IS_ERR(data)) {
+ msm_host->tx_gem_obj = NULL;
+ return PTR_ERR(data);
+ }
+
+ msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
+ msm_host->tx_size = msm_host->tx_gem_obj->size;
+
+ return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+ &msm_host->tx_buf_paddr, GFP_KERNEL);
+ if (!msm_host->tx_buf)
+ return -ENOMEM;
+
+ msm_host->tx_size = size;
+
+ return 0;
+}
+
+void msm_dsi_tx_buf_free(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_device *dev = msm_host->dev;
+
+ /*
+ * This is possible if we're tearing down before we've had a chance to
+ * fully initialize. A very real possibility if our probe is deferred,
+ * in which case we'll hit msm_dsi_host_destroy() without having run
+ * through the dsi_tx_buf_alloc().
+ */
+ if (!dev)
+ return;
+
+ if (msm_host->tx_gem_obj) {
+ msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->aspace);
+ msm_gem_address_space_put(msm_host->aspace);
+ msm_host->tx_gem_obj = NULL;
+ msm_host->aspace = NULL;
+ }
+
+ if (msm_host->tx_buf)
+ dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
+ msm_host->tx_buf_paddr);
+}
+
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+ return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+ return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
+ const struct mipi_dsi_msg *msg)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ struct mipi_dsi_packet packet;
+ int len;
+ int ret;
+ u8 *data;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret) {
+ pr_err("%s: create packet failed, %d\n", __func__, ret);
+ return ret;
+ }
+ len = (packet.size + 3) & (~0x3);
+
+ if (len > msm_host->tx_size) {
+ pr_err("%s: packet size is too big\n", __func__);
+ return -EINVAL;
+ }
+
+ data = cfg_hnd->ops->tx_buf_get(msm_host);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ /* MSM specific command format in memory */
+ data[0] = packet.header[1];
+ data[1] = packet.header[2];
+ data[2] = packet.header[0];
+ data[3] = BIT(7); /* Last packet */
+ if (mipi_dsi_packet_format_is_long(msg->type))
+ data[3] |= BIT(6);
+ if (msg->rx_buf && msg->rx_len)
+ data[3] |= BIT(5);
+
+ /* Long packet */
+ if (packet.payload && packet.payload_length)
+ memcpy(data + 4, packet.payload, packet.payload_length);
+
+ /* Append 0xff to the end */
+ if (packet.size < len)
+ memset(data + packet.size, 0xff, len - packet.size);
+
+ if (cfg_hnd->ops->tx_buf_put)
+ cfg_hnd->ops->tx_buf_put(msm_host);
+
+ return len;
+}
+
+/*
+ * dsi_short_read1_resp: 1 parameter
+ */
+static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 1)) {
+ *data = buf[1]; /* strip out dcs type */
+ return 1;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+/*
+ * dsi_short_read2_resp: 2 parameter
+ */
+static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ u8 *data = msg->rx_buf;
+ if (data && (msg->rx_len >= 2)) {
+ data[0] = buf[1]; /* strip out dcs type */
+ data[1] = buf[2];
+ return 2;
+ } else {
+ pr_err("%s: read data does not match with rx_buf len %zu\n",
+ __func__, msg->rx_len);
+ return -EINVAL;
+ }
+}
+
+static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
+{
+ /* strip out 4 byte dcs header */
+ if (msg->rx_buf && msg->rx_len)
+ memcpy(msg->rx_buf, buf + 4, msg->rx_len);
+
+ return msg->rx_len;
+}
+
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+ struct drm_device *dev = msm_host->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!dma_base)
+ return -EINVAL;
+
+ return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
+ priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+ if (!dma_base)
+ return -EINVAL;
+
+ *dma_base = msm_host->tx_buf_paddr;
+ return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ int ret;
+ uint64_t dma_base;
+ bool triggered;
+
+ ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
+ }
+
+ reinit_completion(&msm_host->dma_comp);
+
+ dsi_wait4video_eng_busy(msm_host);
+
+ triggered = msm_dsi_manager_cmd_xfer_trigger(
+ msm_host->id, dma_base, len);
+ if (triggered) {
+ ret = wait_for_completion_timeout(&msm_host->dma_comp,
+ msecs_to_jiffies(200));
+ DBG("ret=%d", ret);
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else
+ ret = len;
+ } else
+ ret = len;
+
+ return ret;
+}
+
+static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
+ u8 *buf, int rx_byte, int pkt_size)
+{
+ u32 *temp, data;
+ int i, j = 0, cnt;
+ u32 read_cnt;
+ u8 reg[16];
+ int repeated_bytes = 0;
+ int buf_offset = buf - msm_host->rx_buf;
+
+ temp = (u32 *)reg;
+ cnt = (rx_byte + 3) >> 2;
+ if (cnt > 4)
+ cnt = 4; /* 4 x 32 bits registers only */
+
+ if (rx_byte == 4)
+ read_cnt = 4;
+ else
+ read_cnt = pkt_size + 6;
+
+ /*
+ * In case of multiple reads from the panel, after the first read, there
+ * is possibility that there are some bytes in the payload repeating in
+ * the RDBK_DATA registers. Since we read all the parameters from the
+ * panel right from the first byte for every pass. We need to skip the
+ * repeating bytes and then append the new parameters to the rx buffer.
+ */
+ if (read_cnt > 16) {
+ int bytes_shifted;
+ /* Any data more than 16 bytes will be shifted out.
+ * The temp read buffer should already contain these bytes.
+ * The remaining bytes in read buffer are the repeated bytes.
+ */
+ bytes_shifted = read_cnt - 16;
+ repeated_bytes = buf_offset - bytes_shifted;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
+ *temp++ = ntohl(data); /* to host byte order */
+ DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
+ }
+
+ for (i = repeated_bytes; i < 16; i++)
+ buf[j++] = reg[i];
+
+ return j;
+}
+
+static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
+ const struct mipi_dsi_msg *msg)
+{
+ int len, ret;
+ int bllp_len = msm_host->mode->hdisplay *
+ dsi_get_bpp(msm_host->format) / 8;
+
+ len = dsi_cmd_dma_add(msm_host, msg);
+ if (len < 0) {
+ pr_err("%s: failed to add cmd type = 0x%x\n",
+ __func__, msg->type);
+ return len;
+ }
+
+ /* for video mode, do not send cmds more than
+ * one pixel line, since it only transmit it
+ * during BLLP.
+ */
+ /* TODO: if the command is sent in LP mode, the bit rate is only
+ * half of esc clk rate. In this case, if the video is already
+ * actively streaming, we need to check more carefully if the
+ * command can be fit into one BLLP.
+ */
+ if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
+ pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ ret = dsi_cmd_dma_tx(msm_host, len);
+ if (ret < 0) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
+ return ret;
+ } else if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
+ return -EIO;
+ }
+
+ return len;
+}
+
+static void dsi_err_worker(struct work_struct *work)
+{
+ struct msm_dsi_host *msm_host =
+ container_of(work, struct msm_dsi_host, err_work);
+ u32 status = msm_host->err_work_state;
+
+ pr_err_ratelimited("%s: status=%x\n", __func__, status);
+ if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
+ dsi_sw_reset(msm_host);
+
+ /* It is safe to clear here because error irq is disabled. */
+ msm_host->err_work_state = 0;
+
+ /* enable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
+}
+
+static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
+ /* Writing of an extra 0 needed to clear error bits */
+ dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
+ msm_host->err_work_state |= DSI_ERR_STATE_ACK;
+ }
+}
+
+static void dsi_timeout_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
+
+ if (status) {
+ dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
+ }
+}
+
+static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
+
+ if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
+ DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
+ dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
+ }
+}
+
+static void dsi_fifo_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
+
+ /* fifo underflow, overflow */
+ if (status) {
+ dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
+ if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
+ }
+}
+
+static void dsi_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_STATUS0);
+
+ if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
+ dsi_write(msm_host, REG_DSI_STATUS0, status);
+ msm_host->err_work_state |=
+ DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
+ }
+}
+
+static void dsi_clk_status(struct msm_dsi_host *msm_host)
+{
+ u32 status;
+
+ status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
+
+ if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
+ dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
+ msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
+ }
+}
+
+static void dsi_error(struct msm_dsi_host *msm_host)
+{
+ /* disable dsi error interrupt */
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
+
+ dsi_clk_status(msm_host);
+ dsi_fifo_status(msm_host);
+ dsi_ack_err_status(msm_host);
+ dsi_timeout_status(msm_host);
+ dsi_status(msm_host);
+ dsi_dln0_phy_err(msm_host);
+
+ queue_work(msm_host->workqueue, &msm_host->err_work);
+}
+
+static irqreturn_t dsi_host_irq(int irq, void *ptr)
+{
+ struct msm_dsi_host *msm_host = ptr;
+ u32 isr;
+ unsigned long flags;
+
+ if (!msm_host->ctrl_base)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&msm_host->intr_lock, flags);
+ isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
+ dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
+ spin_unlock_irqrestore(&msm_host->intr_lock, flags);
+
+ DBG("isr=0x%x, id=%d", isr, msm_host->id);
+
+ if (isr & DSI_IRQ_ERROR)
+ dsi_error(msm_host);
+
+ if (isr & DSI_IRQ_VIDEO_DONE)
+ complete(&msm_host->video_comp);
+
+ if (isr & DSI_IRQ_CMD_DMA_DONE)
+ complete(&msm_host->dma_comp);
+
+ return IRQ_HANDLED;
+}
+
+static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
+ struct device *panel_device)
+{
+ msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
+ "disp-enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(msm_host->disp_en_gpio)) {
+ DBG("cannot get disp-enable-gpios %ld",
+ PTR_ERR(msm_host->disp_en_gpio));
+ return PTR_ERR(msm_host->disp_en_gpio);
+ }
+
+ msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
+ GPIOD_IN);
+ if (IS_ERR(msm_host->te_gpio)) {
+ DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
+ return PTR_ERR(msm_host->te_gpio);
+ }
+
+ return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ if (dsi->lanes > msm_host->num_data_lanes)
+ return -EINVAL;
+
+ msm_host->channel = dsi->channel;
+ msm_host->lanes = dsi->lanes;
+ msm_host->format = dsi->format;
+ msm_host->mode_flags = dsi->mode_flags;
+ if (dsi->dsc)
+ msm_host->dsc = dsi->dsc;
+
+ /* Some gpios defined in panel DT need to be controlled by host */
+ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
+ if (ret)
+ return ret;
+
+ ret = dsi_dev_attach(msm_host->pdev);
+ if (ret)
+ return ret;
+
+ DBG("id=%d", msm_host->id);
+
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_dev_detach(msm_host->pdev);
+
+ DBG("id=%d", msm_host->id);
+
+ return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ if (!msg || !msm_host->power_on)
+ return -EINVAL;
+
+ mutex_lock(&msm_host->cmd_mutex);
+ ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
+ mutex_unlock(&msm_host->cmd_mutex);
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+ .transfer = dsi_host_transfer,
+};
+
+/*
+ * List of supported physical to logical lane mappings.
+ * For example, the 2nd entry represents the following mapping:
+ *
+ * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
+ */
+static const int supported_data_lane_swaps[][4] = {
+ { 0, 1, 2, 3 },
+ { 3, 0, 1, 2 },
+ { 2, 3, 0, 1 },
+ { 1, 2, 3, 0 },
+ { 0, 3, 2, 1 },
+ { 1, 0, 3, 2 },
+ { 2, 1, 0, 3 },
+ { 3, 2, 1, 0 },
+};
+
+static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
+ struct device_node *ep)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ struct property *prop;
+ u32 lane_map[4];
+ int ret, i, len, num_lanes;
+
+ prop = of_find_property(ep, "data-lanes", &len);
+ if (!prop) {
+ DRM_DEV_DEBUG(dev,
+ "failed to find data lane mapping, using default\n");
+ /* Set the number of date lanes to 4 by default. */
+ msm_host->num_data_lanes = 4;
+ return 0;
+ }
+
+ num_lanes = drm_of_get_data_lanes_count(ep, 1, 4);
+ if (num_lanes < 0) {
+ DRM_DEV_ERROR(dev, "bad number of data lanes\n");
+ return num_lanes;
+ }
+
+ msm_host->num_data_lanes = num_lanes;
+
+ ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
+ num_lanes);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to read lane data\n");
+ return ret;
+ }
+
+ /*
+ * compare DT specified physical-logical lane mappings with the ones
+ * supported by hardware
+ */
+ for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
+ const int *swap = supported_data_lane_swaps[i];
+ int j;
+
+ /*
+ * the data-lanes array we get from DT has a logical->physical
+ * mapping. The "data lane swap" register field represents
+ * supported configurations in a physical->logical mapping.
+ * Translate the DT mapping to what we understand and find a
+ * configuration that works.
+ */
+ for (j = 0; j < num_lanes; j++) {
+ if (lane_map[j] < 0 || lane_map[j] > 3)
+ DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
+ lane_map[j]);
+
+ if (swap[lane_map[j]] != j)
+ break;
+ }
+
+ if (j == num_lanes) {
+ msm_host->dlane_swap = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static u32 dsi_dsc_rc_buf_thresh[DSC_NUM_BUF_RANGES - 1] = {
+ 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62,
+ 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e
+};
+
+/* only 8bpc, 8bpp added */
+static char min_qp[DSC_NUM_BUF_RANGES] = {
+ 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13
+};
+
+static char max_qp[DSC_NUM_BUF_RANGES] = {
+ 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15
+};
+
+static char bpg_offset[DSC_NUM_BUF_RANGES] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+};
+
+static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc_config *dsc)
+{
+ int i;
+ u16 bpp = dsc->bits_per_pixel >> 4;
+
+ if (dsc->bits_per_pixel & 0xf) {
+ DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support fractional bits_per_pixel\n");
+ return -EINVAL;
+ }
+
+ if (dsc->bits_per_component != 8) {
+ DRM_DEV_ERROR(&msm_host->pdev->dev, "DSI does not support bits_per_component != 8 yet\n");
+ return -EOPNOTSUPP;
+ }
+
+ dsc->rc_model_size = 8192;
+ dsc->first_line_bpg_offset = 12;
+ dsc->rc_edge_factor = 6;
+ dsc->rc_tgt_offset_high = 3;
+ dsc->rc_tgt_offset_low = 3;
+ dsc->simple_422 = 0;
+ dsc->convert_rgb = 1;
+ dsc->vbr_enable = 0;
+
+ /* handle only bpp = bpc = 8 */
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++)
+ dsc->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ dsc->rc_range_params[i].range_min_qp = min_qp[i];
+ dsc->rc_range_params[i].range_max_qp = max_qp[i];
+ /*
+ * Range BPG Offset contains two's-complement signed values that fill
+ * 8 bits, yet the registers and DCS PPS field are only 6 bits wide.
+ */
+ dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i] & DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+ dsc->initial_offset = 6144; /* Not bpp 12 */
+ if (bpp != 8)
+ dsc->initial_offset = 2048; /* bpp = 12 */
+
+ if (dsc->bits_per_component <= 10)
+ dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ else
+ dsc->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ dsc->initial_xmit_delay = 512;
+ dsc->initial_scale_value = 32;
+ dsc->first_line_bpg_offset = 12;
+ dsc->line_buf_depth = dsc->bits_per_component + 1;
+
+ /* bpc 8 */
+ dsc->flatness_min_qp = 3;
+ dsc->flatness_max_qp = 12;
+ dsc->rc_quant_incr_limit0 = 11;
+ dsc->rc_quant_incr_limit1 = 11;
+
+ return drm_dsc_compute_rc_parameters(dsc);
+}
+
+static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
+{
+ struct device *dev = &msm_host->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *endpoint;
+ int ret = 0;
+
+ /*
+ * Get the endpoint of the output port of the DSI host. In our case,
+ * this is mapped to port number with reg = 1. Don't return an error if
+ * the remote endpoint isn't defined. It's possible that there is
+ * nothing connected to the dsi output.
+ */
+ endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
+ if (!endpoint) {
+ DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
+ return 0;
+ }
+
+ ret = dsi_host_parse_lane_data(msm_host, endpoint);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (of_property_read_bool(np, "syscon-sfpb")) {
+ msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
+ "syscon-sfpb");
+ if (IS_ERR(msm_host->sfpb)) {
+ DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
+ __func__);
+ ret = PTR_ERR(msm_host->sfpb);
+ }
+ }
+
+err:
+ of_node_put(endpoint);
+
+ return ret;
+}
+
+static int dsi_host_get_id(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_host *msm_host = NULL;
+ struct platform_device *pdev = msm_dsi->pdev;
+ const struct msm_dsi_config *cfg;
+ int ret;
+
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
+ if (!msm_host) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ msm_host->pdev = pdev;
+ msm_dsi->host = &msm_host->base;
+
+ ret = dsi_host_parse_dt(msm_host);
+ if (ret) {
+ pr_err("%s: failed to parse dt\n", __func__);
+ goto fail;
+ }
+
+ msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
+ if (IS_ERR(msm_host->ctrl_base)) {
+ pr_err("%s: unable to map Dsi ctrl base\n", __func__);
+ ret = PTR_ERR(msm_host->ctrl_base);
+ goto fail;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ msm_host->cfg_hnd = dsi_get_config(msm_host);
+ if (!msm_host->cfg_hnd) {
+ ret = -EINVAL;
+ pr_err("%s: get config failed\n", __func__);
+ goto fail;
+ }
+ cfg = msm_host->cfg_hnd->cfg;
+
+ msm_host->id = dsi_host_get_id(msm_host);
+ if (msm_host->id < 0) {
+ ret = msm_host->id;
+ pr_err("%s: unable to identify DSI host index\n", __func__);
+ goto fail;
+ }
+
+ /* fixup base address by io offset */
+ msm_host->ctrl_base += cfg->io_offset;
+
+ ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators,
+ cfg->regulator_data,
+ &msm_host->supplies);
+ if (ret)
+ goto fail;
+
+ ret = dsi_clk_init(msm_host);
+ if (ret) {
+ pr_err("%s: unable to initialize dsi clks\n", __func__);
+ goto fail;
+ }
+
+ msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
+ if (!msm_host->rx_buf) {
+ ret = -ENOMEM;
+ pr_err("%s: alloc rx temp buf failed\n", __func__);
+ goto fail;
+ }
+
+ ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
+ if (ret)
+ return ret;
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(&pdev->dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(&pdev->dev, "invalid OPP table in device tree\n");
+ return ret;
+ }
+
+ msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!msm_host->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -EINVAL;
+ }
+
+ /* do not autoenable, will be enabled later */
+ ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
+ IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN,
+ "dsi_isr", msm_host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ msm_host->irq, ret);
+ return ret;
+ }
+
+ init_completion(&msm_host->dma_comp);
+ init_completion(&msm_host->video_comp);
+ mutex_init(&msm_host->dev_mutex);
+ mutex_init(&msm_host->cmd_mutex);
+ spin_lock_init(&msm_host->intr_lock);
+
+ /* setup workqueue */
+ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+ if (!msm_host->workqueue)
+ return -ENOMEM;
+
+ INIT_WORK(&msm_host->err_work, dsi_err_worker);
+
+ msm_dsi->id = msm_host->id;
+
+ DBG("Dsi Host %d initialized", msm_host->id);
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_dsi_host_destroy(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+ if (msm_host->workqueue) {
+ destroy_workqueue(msm_host->workqueue);
+ msm_host->workqueue = NULL;
+ }
+
+ mutex_destroy(&msm_host->cmd_mutex);
+ mutex_destroy(&msm_host->dev_mutex);
+
+ pm_runtime_disable(&msm_host->pdev->dev);
+}
+
+int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
+ struct drm_device *dev)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ int ret;
+
+ msm_host->dev = dev;
+
+ ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
+ if (ret) {
+ pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_dsi_host_register(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ /* Register mipi dsi host */
+ if (!msm_host->registered) {
+ host->dev = &msm_host->pdev->dev;
+ host->ops = &dsi_host_ops;
+ ret = mipi_dsi_host_register(host);
+ if (ret)
+ return ret;
+
+ msm_host->registered = true;
+ }
+
+ return 0;
+}
+
+void msm_dsi_host_unregister(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->registered) {
+ mipi_dsi_host_unregister(host);
+ host->dev = NULL;
+ host->ops = NULL;
+ msm_host->registered = false;
+ }
+}
+
+int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+ /* TODO: make sure dsi_cmd_mdp is idle.
+ * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
+ * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
+ * How to handle the old versions? Wait for mdp cmd done?
+ */
+
+ /*
+ * mdss interrupt is generated in mdp core clock domain
+ * mdp clock need to be enabled to receive dsi interrupt
+ */
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ cfg_hnd->ops->link_clk_set_rate(msm_host);
+ cfg_hnd->ops->link_clk_enable(msm_host);
+
+ /* TODO: vote for bus bandwidth */
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(0, msm_host);
+
+ msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
+ dsi_write(msm_host, REG_DSI_CTRL,
+ msm_host->dma_cmd_ctrl_restore |
+ DSI_CTRL_CMD_MODE_EN |
+ DSI_CTRL_ENABLE);
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
+
+ return 0;
+}
+
+void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+ dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
+ dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
+
+ if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
+ dsi_set_tx_power_mode(1, msm_host);
+
+ /* TODO: unvote for bus bandwidth */
+
+ cfg_hnd->ops->link_clk_disable(msm_host);
+ pm_runtime_put(&msm_host->pdev->dev);
+}
+
+int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return dsi_cmds2buf_tx(msm_host, msg);
+}
+
+int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ int data_byte, rx_byte, dlen, end;
+ int short_response, diff, pkt_size, ret = 0;
+ char cmd;
+ int rlen = msg->rx_len;
+ u8 *buf;
+
+ if (rlen <= 2) {
+ short_response = 1;
+ pkt_size = rlen;
+ rx_byte = 4;
+ } else {
+ short_response = 0;
+ data_byte = 10; /* first read */
+ if (rlen < data_byte)
+ pkt_size = rlen;
+ else
+ pkt_size = data_byte;
+ rx_byte = data_byte + 6; /* 4 header + 2 crc */
+ }
+
+ buf = msm_host->rx_buf;
+ end = 0;
+ while (!end) {
+ u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
+ struct mipi_dsi_msg max_pkt_size_msg = {
+ .channel = msg->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = 2,
+ .tx_buf = tx,
+ };
+
+ DBG("rlen=%d pkt_size=%d rx_byte=%d",
+ rlen, pkt_size, rx_byte);
+
+ ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
+ if (ret < 2) {
+ pr_err("%s: Set max pkt size failed, %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
+ (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
+ /* Clear the RDBK_DATA registers */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
+ DSI_RDBK_DATA_CTRL_CLR);
+ wmb(); /* make sure the RDBK registers are cleared */
+ dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
+ wmb(); /* release cleared status before transfer */
+ }
+
+ ret = dsi_cmds2buf_tx(msm_host, msg);
+ if (ret < 0) {
+ pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
+ return ret;
+ } else if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
+ return -ECOMM;
+ }
+
+ /*
+ * once cmd_dma_done interrupt received,
+ * return data from client is ready and stored
+ * at RDBK_DATA register already
+ * since rx fifo is 16 bytes, dcs header is kept at first loop,
+ * after that dcs header lost during shift into registers
+ */
+ dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
+
+ if (dlen <= 0)
+ return 0;
+
+ if (short_response)
+ break;
+
+ if (rlen <= data_byte) {
+ diff = data_byte - rlen;
+ end = 1;
+ } else {
+ diff = 0;
+ rlen -= data_byte;
+ }
+
+ if (!end) {
+ dlen -= 2; /* 2 crc */
+ dlen -= diff;
+ buf += dlen; /* next start position */
+ data_byte = 14; /* NOT first read */
+ if (rlen < data_byte)
+ pkt_size += rlen;
+ else
+ pkt_size += data_byte;
+ DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
+ }
+ }
+
+ /*
+ * For single Long read, if the requested rlen < 10,
+ * we need to shift the start position of rx
+ * data buffer to skip the bytes which are not
+ * updated.
+ */
+ if (pkt_size < 10 && !short_response)
+ buf = msm_host->rx_buf + (10 - rlen);
+ else
+ buf = msm_host->rx_buf;
+
+ cmd = buf[0];
+ switch (cmd) {
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
+ ret = 0;
+ break;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ ret = dsi_short_read1_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ ret = dsi_short_read2_resp(buf, msg);
+ break;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ ret = dsi_long_read_resp(buf, msg);
+ break;
+ default:
+ pr_warn("%s:Invalid response cmd\n", __func__);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
+ u32 len)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
+ dsi_write(msm_host, REG_DSI_DMA_LEN, len);
+ dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
+
+ /* Make sure trigger happens */
+ wmb();
+}
+
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
+ struct msm_dsi_phy *src_phy)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ msm_host->cphy_mode = src_phy->cphy_mode;
+}
+
+void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ DBG("");
+ dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
+ /* Make sure fully reset */
+ wmb();
+ udelay(1000);
+ dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
+ udelay(100);
+}
+
+void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_bonded_dsi)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ int ret;
+
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ return;
+ }
+
+ /* CPHY transmits 16 bits over 7 clock cycles
+ * "byte_clk" is in units of 16-bits (see dsi_calc_pclk),
+ * so multiply by 7 to get the "bitclk rate"
+ */
+ if (msm_host->cphy_mode)
+ clk_req->bitclk_rate = msm_host->byte_clk_rate * 7;
+ else
+ clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
+ clk_req->escclk_rate = msm_host->esc_clk_rate;
+}
+
+void msm_dsi_host_enable_irq(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ enable_irq(msm_host->irq);
+}
+
+void msm_dsi_host_disable_irq(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ disable_irq(msm_host->irq);
+}
+
+int msm_dsi_host_enable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
+
+ /* TODO: clock should be turned off for command mode,
+ * and only turned on before MDP START.
+ * This part of code should be enabled once mdp driver support it.
+ */
+ /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
+ * dsi_link_clk_disable(msm_host);
+ * pm_runtime_put(&msm_host->pdev->dev);
+ * }
+ */
+ msm_host->enabled = true;
+ return 0;
+}
+
+int msm_dsi_host_disable(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ msm_host->enabled = false;
+ dsi_op_mode_config(msm_host,
+ !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
+
+ /* Since we have disabled INTF, the video engine won't stop so that
+ * the cmd engine will be blocked.
+ * Reset to disable video engine so that we can send off cmd.
+ */
+ dsi_sw_reset(msm_host);
+
+ return 0;
+}
+
+static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
+{
+ enum sfpb_ahb_arb_master_port_en en;
+
+ if (!msm_host->sfpb)
+ return;
+
+ en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
+
+ regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
+ SFPB_GPREG_MASTER_PORT_EN__MASK,
+ SFPB_GPREG_MASTER_PORT_EN(en));
+}
+
+int msm_dsi_host_power_on(struct mipi_dsi_host *host,
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_bonded_dsi, struct msm_dsi_phy *phy)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ int ret = 0;
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (msm_host->power_on) {
+ DBG("dsi host already on");
+ goto unlock_ret;
+ }
+
+ msm_dsi_sfpb_config(msm_host, true);
+
+ ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
+ if (ret) {
+ pr_err("%s:Failed to enable vregs.ret=%d\n",
+ __func__, ret);
+ goto unlock_ret;
+ }
+
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ ret = cfg_hnd->ops->link_clk_set_rate(msm_host);
+ if (!ret)
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
+ if (ret) {
+ pr_err("%s: failed to enable link clocks. ret=%d\n",
+ __func__, ret);
+ goto fail_disable_reg;
+ }
+
+ ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
+ if (ret) {
+ pr_err("%s: failed to set pinctrl default state, %d\n",
+ __func__, ret);
+ goto fail_disable_clk;
+ }
+
+ dsi_timing_setup(msm_host, is_bonded_dsi);
+ dsi_sw_reset(msm_host);
+ dsi_ctrl_config(msm_host, true, phy_shared_timings, phy);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 1);
+
+ msm_host->power_on = true;
+ mutex_unlock(&msm_host->dev_mutex);
+
+ return 0;
+
+fail_disable_clk:
+ cfg_hnd->ops->link_clk_disable(msm_host);
+ pm_runtime_put(&msm_host->pdev->dev);
+fail_disable_reg:
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return ret;
+}
+
+int msm_dsi_host_power_off(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+
+ mutex_lock(&msm_host->dev_mutex);
+ if (!msm_host->power_on) {
+ DBG("dsi host already off");
+ goto unlock_ret;
+ }
+
+ dsi_ctrl_config(msm_host, false, NULL, NULL);
+
+ if (msm_host->disp_en_gpio)
+ gpiod_set_value(msm_host->disp_en_gpio, 0);
+
+ pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
+
+ cfg_hnd->ops->link_clk_disable(msm_host);
+ pm_runtime_put(&msm_host->pdev->dev);
+
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
+
+ msm_dsi_sfpb_config(msm_host, false);
+
+ DBG("-");
+
+ msm_host->power_on = false;
+
+unlock_ret:
+ mutex_unlock(&msm_host->dev_mutex);
+ return 0;
+}
+
+int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
+ const struct drm_display_mode *mode)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (msm_host->mode) {
+ drm_mode_destroy(msm_host->dev, msm_host->mode);
+ msm_host->mode = NULL;
+ }
+
+ msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
+ if (!msm_host->mode) {
+ pr_err("%s: cannot duplicate mode\n", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
+ const struct drm_display_mode *mode)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct drm_dsc_config *dsc = msm_host->dsc;
+ int pic_width = mode->hdisplay;
+ int pic_height = mode->vdisplay;
+
+ if (!msm_host->dsc)
+ return MODE_OK;
+
+ if (pic_width % dsc->slice_width) {
+ pr_err("DSI: pic_width %d has to be multiple of slice %d\n",
+ pic_width, dsc->slice_width);
+ return MODE_H_ILLEGAL;
+ }
+
+ if (pic_height % dsc->slice_height) {
+ pr_err("DSI: pic_height %d has to be multiple of slice %d\n",
+ pic_height, dsc->slice_height);
+ return MODE_V_ILLEGAL;
+ }
+
+ return MODE_OK;
+}
+
+unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
+{
+ return to_msm_dsi_host(host)->mode_flags;
+}
+
+void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+
+ msm_disp_snapshot_add_block(disp_state, msm_host->ctrl_size,
+ msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id);
+
+ pm_runtime_put_sync(&msm_host->pdev->dev);
+}
+
+static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host)
+{
+ u32 reg;
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff);
+ /* draw checkered rectangle pattern */
+ dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL,
+ DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN);
+ /* use 24-bit RGB test pttern */
+ dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG,
+ DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) |
+ DSI_TPG_VIDEO_CONFIG_RGB);
+
+ reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN);
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ DBG("Video test pattern setup done\n");
+}
+
+static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host)
+{
+ u32 reg;
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+
+ /* initial value for test pattern */
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff);
+
+ reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN);
+
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg);
+ /* draw checkered rectangle pattern */
+ dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2,
+ DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN);
+
+ DBG("Cmd test pattern setup done\n");
+}
+
+void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO);
+ u32 reg;
+
+ if (is_video_mode)
+ msm_dsi_host_video_test_pattern_setup(msm_host);
+ else
+ msm_dsi_host_cmd_test_pattern_setup(msm_host);
+
+ reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL);
+ /* enable the test pattern generator */
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN));
+
+ /* for command mode need to trigger one frame from tpg */
+ if (!is_video_mode)
+ dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER,
+ DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
+}
+
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return msm_host->dsc;
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
new file mode 100644
index 000000000..3a1417397
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "drm/drm_bridge_connector.h"
+
+#include "msm_kms.h"
+#include "dsi.h"
+
+#define DSI_CLOCK_MASTER DSI_0
+#define DSI_CLOCK_SLAVE DSI_1
+
+#define DSI_LEFT DSI_0
+#define DSI_RIGHT DSI_1
+
+/* According to the current drm framework sequence, take the encoder of
+ * DSI_1 as master encoder
+ */
+#define DSI_ENCODER_MASTER DSI_1
+#define DSI_ENCODER_SLAVE DSI_0
+
+struct msm_dsi_manager {
+ struct msm_dsi *dsi[DSI_MAX];
+
+ bool is_bonded_dsi;
+ bool is_sync_needed;
+ int master_dsi_link_id;
+};
+
+static struct msm_dsi_manager msm_dsim_glb;
+
+#define IS_BONDED_DSI() (msm_dsim_glb.is_bonded_dsi)
+#define IS_SYNC_NEEDED() (msm_dsim_glb.is_sync_needed)
+#define IS_MASTER_DSI_LINK(id) (msm_dsim_glb.master_dsi_link_id == id)
+
+#ifdef CONFIG_OF
+static bool dsi_mgr_power_on_early(struct drm_bridge *bridge)
+{
+ struct drm_bridge *next_bridge = drm_bridge_get_next_bridge(bridge);
+
+ /*
+ * If the next bridge in the chain is the Parade ps8640 bridge chip
+ * then don't power on early since it seems to violate the expectations
+ * of the firmware that the bridge chip is running.
+ *
+ * NOTE: this is expected to be a temporary special case. It's expected
+ * that we'll eventually have a framework that allows the next level
+ * bridge to indicate whether it needs us to power on before it or
+ * after it. When that framework is in place then we'll use it and
+ * remove this special case.
+ */
+ return !(next_bridge && next_bridge->of_node &&
+ of_device_is_compatible(next_bridge->of_node, "parade,ps8640"));
+}
+#else
+static inline bool dsi_mgr_power_on_early(struct drm_bridge *bridge)
+{
+ return true;
+}
+#endif
+
+static inline struct msm_dsi *dsi_mgr_get_dsi(int id)
+{
+ return msm_dsim_glb.dsi[id];
+}
+
+static inline struct msm_dsi *dsi_mgr_get_other_dsi(int id)
+{
+ return msm_dsim_glb.dsi[(id + 1) % DSI_MAX];
+}
+
+static int dsi_mgr_parse_of(struct device_node *np, int id)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ /* We assume 2 dsi nodes have the same information of bonded dsi and
+ * sync-mode, and only one node specifies master in case of bonded mode.
+ */
+ if (!msm_dsim->is_bonded_dsi)
+ msm_dsim->is_bonded_dsi = of_property_read_bool(np, "qcom,dual-dsi-mode");
+
+ if (msm_dsim->is_bonded_dsi) {
+ if (of_property_read_bool(np, "qcom,master-dsi"))
+ msm_dsim->master_dsi_link_id = id;
+ if (!msm_dsim->is_sync_needed)
+ msm_dsim->is_sync_needed = of_property_read_bool(
+ np, "qcom,sync-dual-dsi");
+ }
+
+ return 0;
+}
+
+static int dsi_mgr_setup_components(int id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+ int ret;
+
+ if (!IS_BONDED_DSI()) {
+ ret = msm_dsi_host_register(msm_dsi->host);
+ if (ret)
+ return ret;
+
+ msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ } else if (other_dsi) {
+ struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
+ msm_dsi : other_dsi;
+ struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
+ other_dsi : msm_dsi;
+ /* Register slave host first, so that slave DSI device
+ * has a chance to probe, and do not block the master
+ * DSI device's probe.
+ * Also, do not check defer for the slave host,
+ * because only master DSI device adds the panel to global
+ * panel list. The panel's device is the master DSI device.
+ */
+ ret = msm_dsi_host_register(slave_link_dsi->host);
+ if (ret)
+ return ret;
+ ret = msm_dsi_host_register(master_link_dsi->host);
+ if (ret)
+ return ret;
+
+ /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */
+ msm_dsi_phy_set_usecase(clk_master_dsi->phy,
+ MSM_DSI_PHY_MASTER);
+ msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
+ MSM_DSI_PHY_SLAVE);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
+ }
+
+ return 0;
+}
+
+static int enable_phy(struct msm_dsi *msm_dsi,
+ struct msm_dsi_phy_shared_timings *shared_timings)
+{
+ struct msm_dsi_phy_clk_request clk_req;
+ bool is_bonded_dsi = IS_BONDED_DSI();
+
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
+
+ return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
+}
+
+static int
+dsi_mgr_phy_enable(int id,
+ struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX])
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+ int ret;
+
+ /* In case of bonded DSI, some registers in PHY1 have been programmed
+ * during PLL0 clock's set_rate. The PHY1 reset called by host1 here
+ * will silently reset those PHY1 registers. Therefore we need to reset
+ * and enable both PHYs before any PLL clock operation.
+ */
+ if (IS_BONDED_DSI() && mdsi && sdsi) {
+ if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+ msm_dsi_host_reset_phy(mdsi->host);
+ msm_dsi_host_reset_phy(sdsi->host);
+
+ ret = enable_phy(mdsi,
+ &shared_timings[DSI_CLOCK_MASTER]);
+ if (ret)
+ return ret;
+ ret = enable_phy(sdsi,
+ &shared_timings[DSI_CLOCK_SLAVE]);
+ if (ret) {
+ msm_dsi_phy_disable(mdsi->phy);
+ return ret;
+ }
+ }
+ } else {
+ msm_dsi_host_reset_phy(msm_dsi->host);
+ ret = enable_phy(msm_dsi, &shared_timings[id]);
+ if (ret)
+ return ret;
+ }
+
+ msm_dsi->phy_enabled = true;
+
+ return 0;
+}
+
+static void dsi_mgr_phy_disable(int id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
+ struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
+
+ /* disable DSI phy
+ * In bonded dsi configuration, the phy should be disabled for the
+ * first controller only when the second controller is disabled.
+ */
+ msm_dsi->phy_enabled = false;
+ if (IS_BONDED_DSI() && mdsi && sdsi) {
+ if (!mdsi->phy_enabled && !sdsi->phy_enabled) {
+ msm_dsi_phy_disable(sdsi->phy);
+ msm_dsi_phy_disable(mdsi->phy);
+ }
+ } else {
+ msm_dsi_phy_disable(msm_dsi->phy);
+ }
+}
+
+struct dsi_bridge {
+ struct drm_bridge base;
+ int id;
+};
+
+#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
+
+static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
+{
+ struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
+ return dsi_bridge->id;
+}
+
+static void msm_dsi_manager_set_split_display(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_drm_private *priv = msm_dsi->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_dsi *master_dsi, *slave_dsi;
+
+ if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
+ master_dsi = other_dsi;
+ slave_dsi = msm_dsi;
+ } else {
+ master_dsi = msm_dsi;
+ slave_dsi = other_dsi;
+ }
+
+ if (!msm_dsi->external_bridge || !IS_BONDED_DSI())
+ return;
+
+ /*
+ * Set split display info to kms once bonded DSI panel is connected to
+ * both hosts.
+ */
+ if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) {
+ kms->funcs->set_split_display(kms, master_dsi->encoder,
+ slave_dsi->encoder,
+ msm_dsi_is_cmd_mode(msm_dsi));
+ }
+}
+
+static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX];
+ bool is_bonded_dsi = IS_BONDED_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ ret = dsi_mgr_phy_enable(id, phy_shared_timings);
+ if (ret)
+ goto phy_en_fail;
+
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_bonded_dsi, msm_dsi->phy);
+ if (ret) {
+ pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
+ goto host_on_fail;
+ }
+
+ if (is_bonded_dsi && msm_dsi1) {
+ ret = msm_dsi_host_power_on(msm_dsi1->host,
+ &phy_shared_timings[DSI_1], is_bonded_dsi, msm_dsi1->phy);
+ if (ret) {
+ pr_err("%s: power on host1 failed, %d\n",
+ __func__, ret);
+ goto host1_on_fail;
+ }
+ }
+
+ /*
+ * Enable before preparing the panel, disable after unpreparing, so
+ * that the panel can communicate over the DSI link.
+ */
+ msm_dsi_host_enable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_enable_irq(msm_dsi1->host);
+
+ return;
+
+host1_on_fail:
+ msm_dsi_host_power_off(host);
+host_on_fail:
+ dsi_mgr_phy_disable(id);
+phy_en_fail:
+ return;
+}
+
+static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_bonded_dsi = IS_BONDED_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ if (!dsi_mgr_power_on_early(bridge))
+ dsi_mgr_bridge_power_on(bridge);
+
+ ret = msm_dsi_host_enable(host);
+ if (ret) {
+ pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
+ goto host_en_fail;
+ }
+
+ if (is_bonded_dsi && msm_dsi1) {
+ ret = msm_dsi_host_enable(msm_dsi1->host);
+ if (ret) {
+ pr_err("%s: enable host1 failed, %d\n", __func__, ret);
+ goto host1_en_fail;
+ }
+ }
+
+ return;
+
+host1_en_fail:
+ msm_dsi_host_disable(host);
+host_en_fail:
+
+ return;
+}
+
+void msm_dsi_manager_tpg_enable(void)
+{
+ struct msm_dsi *m_dsi = dsi_mgr_get_dsi(DSI_0);
+ struct msm_dsi *s_dsi = dsi_mgr_get_dsi(DSI_1);
+
+ /* if dual dsi, trigger tpg on master first then slave */
+ if (m_dsi) {
+ msm_dsi_host_test_pattern_en(m_dsi->host);
+ if (IS_BONDED_DSI() && s_dsi)
+ msm_dsi_host_test_pattern_en(s_dsi->host);
+ }
+}
+
+static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_bonded_dsi = IS_BONDED_DSI();
+ int ret;
+
+ DBG("id=%d", id);
+
+ if (!msm_dsi_device_connected(msm_dsi))
+ return;
+
+ /*
+ * Do nothing with the host if it is slave-DSI in case of bonded DSI.
+ * It is safe to call dsi_mgr_phy_disable() here because a single PHY
+ * won't be diabled until both PHYs request disable.
+ */
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
+ goto disable_phy;
+
+ ret = msm_dsi_host_disable(host);
+ if (ret)
+ pr_err("%s: host %d disable failed, %d\n", __func__, id, ret);
+
+ if (is_bonded_dsi && msm_dsi1) {
+ ret = msm_dsi_host_disable(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 disable failed, %d\n", __func__, ret);
+ }
+
+ msm_dsi_host_disable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_disable_irq(msm_dsi1->host);
+
+ /* Save PHY status if it is a clock source */
+ msm_dsi_phy_pll_save_state(msm_dsi->phy);
+
+ ret = msm_dsi_host_power_off(host);
+ if (ret)
+ pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+
+ if (is_bonded_dsi && msm_dsi1) {
+ ret = msm_dsi_host_power_off(msm_dsi1->host);
+ if (ret)
+ pr_err("%s: host1 power off failed, %d\n",
+ __func__, ret);
+ }
+
+disable_phy:
+ dsi_mgr_phy_disable(id);
+}
+
+static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_bonded_dsi = IS_BONDED_DSI();
+
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
+
+ if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
+ return;
+
+ msm_dsi_host_set_display_mode(host, adjusted_mode);
+ if (is_bonded_dsi && other_dsi)
+ msm_dsi_host_set_display_mode(other_dsi->host, adjusted_mode);
+
+ if (dsi_mgr_power_on_early(bridge))
+ dsi_mgr_bridge_power_on(bridge);
+}
+
+static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct mipi_dsi_host *host = msm_dsi->host;
+
+ return msm_dsi_host_check_dsc(host, mode);
+}
+
+static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
+ .pre_enable = dsi_mgr_bridge_pre_enable,
+ .post_disable = dsi_mgr_bridge_post_disable,
+ .mode_set = dsi_mgr_bridge_mode_set,
+ .mode_valid = dsi_mgr_bridge_mode_valid,
+};
+
+/* initialize bridge */
+struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_bridge *bridge = NULL;
+ struct dsi_bridge *dsi_bridge;
+ struct drm_encoder *encoder;
+ int ret;
+
+ dsi_bridge = devm_kzalloc(msm_dsi->dev->dev,
+ sizeof(*dsi_bridge), GFP_KERNEL);
+ if (!dsi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dsi_bridge->id = id;
+
+ encoder = msm_dsi->encoder;
+
+ bridge = &dsi_bridge->base;
+ bridge->funcs = &dsi_mgr_bridge_funcs;
+
+ drm_bridge_add(bridge);
+
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ msm_dsi_manager_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
+
+int msm_dsi_manager_ext_bridge_init(u8 id)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct drm_device *dev = msm_dsi->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *int_bridge, *ext_bridge;
+ int ret;
+
+ int_bridge = msm_dsi->bridge;
+ ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
+ msm_dsi->pdev->dev.of_node, 1, 0);
+ if (IS_ERR(ext_bridge))
+ return PTR_ERR(ext_bridge);
+
+ msm_dsi->external_bridge = ext_bridge;
+
+ encoder = msm_dsi->encoder;
+
+ /*
+ * Try first to create the bridge without it creating its own
+ * connector.. currently some bridges support this, and others
+ * do not (and some support both modes)
+ */
+ ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret == -EINVAL) {
+ /*
+ * link the internal dsi bridge to the external bridge,
+ * connector is created by the next bridge.
+ */
+ ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
+ if (ret < 0)
+ return ret;
+ } else {
+ struct drm_connector *connector;
+
+ /* We are in charge of the connector, create one now. */
+ connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ return PTR_ERR(connector);
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* The pipeline is ready, ping encoders if necessary */
+ msm_dsi_manager_set_split_display(id);
+
+ return 0;
+}
+
+void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
+{
+ drm_bridge_remove(bridge);
+}
+
+int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ bool is_read = (msg->rx_buf && msg->rx_len);
+ bool need_sync = (IS_SYNC_NEEDED() && !is_read);
+ int ret;
+
+ if (!msg->tx_buf || !msg->tx_len)
+ return 0;
+
+ /* In bonded master case, panel requires the same commands sent to
+ * both DSI links. Host issues the command trigger to both links
+ * when DSI_1 calls the cmd transfer function, no matter it happens
+ * before or after DSI_0 cmd transfer.
+ */
+ if (need_sync && (id == DSI_0))
+ return is_read ? msg->rx_len : msg->tx_len;
+
+ if (need_sync && msm_dsi0) {
+ ret = msm_dsi_host_xfer_prepare(msm_dsi0->host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare non-trigger host, %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ ret = msm_dsi_host_xfer_prepare(host, msg);
+ if (ret) {
+ pr_err("%s: failed to prepare host, %d\n", __func__, ret);
+ goto restore_host0;
+ }
+
+ ret = is_read ? msm_dsi_host_cmd_rx(host, msg) :
+ msm_dsi_host_cmd_tx(host, msg);
+
+ msm_dsi_host_xfer_restore(host, msg);
+
+restore_host0:
+ if (need_sync && msm_dsi0)
+ msm_dsi_host_xfer_restore(msm_dsi0->host, msg);
+
+ return ret;
+}
+
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len)
+{
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
+ struct mipi_dsi_host *host = msm_dsi->host;
+
+ if (IS_SYNC_NEEDED() && (id == DSI_0))
+ return false;
+
+ if (IS_SYNC_NEEDED() && msm_dsi0)
+ msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, dma_base, len);
+
+ msm_dsi_host_cmd_xfer_commit(host, dma_base, len);
+
+ return true;
+}
+
+int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+ int id = msm_dsi->id;
+ int ret;
+
+ if (id >= DSI_MAX) {
+ pr_err("%s: invalid id %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (msm_dsim->dsi[id]) {
+ pr_err("%s: dsi%d already registered\n", __func__, id);
+ return -EBUSY;
+ }
+
+ msm_dsim->dsi[id] = msm_dsi;
+
+ ret = dsi_mgr_parse_of(msm_dsi->pdev->dev.of_node, id);
+ if (ret) {
+ pr_err("%s: failed to parse OF DSI info\n", __func__);
+ goto fail;
+ }
+
+ ret = dsi_mgr_setup_components(id);
+ if (ret) {
+ pr_err("%s: failed to register mipi dsi host for DSI %d: %d\n",
+ __func__, id, ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ msm_dsim->dsi[id] = NULL;
+ return ret;
+}
+
+void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
+{
+ struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
+
+ if (msm_dsi->host)
+ msm_dsi_host_unregister(msm_dsi->host);
+
+ if (msm_dsi->id >= 0)
+ msm_dsim->dsi[msm_dsi->id] = NULL;
+}
+
+bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
+{
+ return IS_BONDED_DSI();
+}
+
+bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
+{
+ return IS_MASTER_DSI_LINK(msm_dsi->id);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h
new file mode 100644
index 000000000..8b1be69cc
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h
@@ -0,0 +1,227 @@
+#ifndef DSI_PHY_10NM_XML
+#define DSI_PHY_10NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034
+
+#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8
+
+#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8
+
+static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c
+
+#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020
+
+#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024
+
+#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c
+
+#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030
+
+#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054
+
+#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064
+
+#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c
+
+#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080
+
+#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094
+
+#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4
+
+#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8
+
+#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4
+
+#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120
+
+#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144
+
+#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154
+
+#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184
+
+#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c
+
+#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
+
+
+#endif /* DSI_PHY_10NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h
new file mode 100644
index 000000000..515f1fa60
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h
@@ -0,0 +1,309 @@
+#ifndef DSI_PHY_14NM_XML
+#define DSI_PHY_14NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK;
+}
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0
+#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK;
+}
+
+#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014
+#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018
+#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c
+
+#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020
+
+#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c
+
+#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040
+
+#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044
+
+#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048
+#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001
+
+#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f
+#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0
+#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6
+static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; }
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; }
+
+#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000
+
+#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004
+
+#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010
+
+#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c
+
+#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038
+
+#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c
+
+#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c
+
+#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c
+
+#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074
+
+#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084
+
+#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088
+
+#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c
+
+#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090
+
+#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098
+
+#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0
+
+#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8
+
+#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8
+
+#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc
+
+#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4
+
+#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8
+
+#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8
+
+#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100
+
+#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104
+
+#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
+
+
+#endif /* DSI_PHY_14NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h
new file mode 100644
index 000000000..81e4622eb
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h
@@ -0,0 +1,237 @@
+#ifndef DSI_PHY_20NM_XML
+#define DSI_PHY_20NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+static inline uint32_t REG_DSI_20nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_20nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_20nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_20nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_20nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_20nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_20nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_20nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_20nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_20nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_20nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_20nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_20nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_20nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_20nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
+
+#define REG_DSI_20nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_20nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
+
+#endif /* DSI_PHY_20NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h
new file mode 100644
index 000000000..8c7db35c1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h
@@ -0,0 +1,384 @@
+#ifndef DSI_PHY_28NM_XML
+#define DSI_PHY_28NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+static inline uint32_t REG_DSI_28nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_3(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_CFG_4(uint32_t i0) { return 0x00000010 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_DEBUG_SEL(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x0000001c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000020 + 0x40*i0; }
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_3 0x0000010c
+
+#define REG_DSI_28nm_PHY_LNCK_CFG_4 0x00000110
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_DATAPATH 0x00000114
+
+#define REG_DSI_28nm_PHY_LNCK_DEBUG_SEL 0x00000118
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR0 0x0000011c
+
+#define REG_DSI_28nm_PHY_LNCK_TEST_STR1 0x00000120
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_3 0x0000014c
+#define DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8 0x00000001
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_28nm_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_28nm_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_28nm_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_28nm_PHY_CTRL_4 0x00000180
+
+#define REG_DSI_28nm_PHY_STRENGTH_0 0x00000184
+
+#define REG_DSI_28nm_PHY_STRENGTH_1 0x00000188
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_0 0x000001b4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_1 0x000001b8
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_2 0x000001bc
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_3 0x000001c0
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_4 0x000001c4
+
+#define REG_DSI_28nm_PHY_BIST_CTRL_5 0x000001c8
+
+#define REG_DSI_28nm_PHY_GLBL_TEST_CTRL 0x000001d4
+#define DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000001
+
+#define REG_DSI_28nm_PHY_LDO_CNTRL 0x000001dc
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_PHY_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
+
+#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
+#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR 0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
+
+#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
+
+#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
+
+#define REG_DSI_28nm_PHY_PLL_VREG_CFG 0x00000010
+#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B 0x00000002
+
+#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
+
+#define REG_DSI_28nm_PHY_PLL_DMUX_CFG 0x00000018
+
+#define REG_DSI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
+
+#define REG_DSI_28nm_PHY_PLL_GLB_CFG 0x00000020
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
+#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
+
+#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
+
+#define REG_DSI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
+
+#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
+
+#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG0 0x00000038
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK 0x0000003f
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK;
+}
+#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP 0x00000040
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK 0x0000003f
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+}
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK 0x00000040
+#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT 6
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG2 0x00000040
+#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK 0x000000ff
+#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG3 0x00000044
+#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK 0x000000ff
+#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT 0
+static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
+{
+ return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK;
+}
+
+#define REG_DSI_28nm_PHY_PLL_SDM_CFG4 0x00000048
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG1 0x00000050
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG2 0x00000054
+
+#define REG_DSI_28nm_PHY_PLL_SSC_CFG3 0x00000058
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
+
+#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
+
+#define REG_DSI_28nm_PHY_PLL_TEST_CFG 0x00000068
+#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG1 0x00000070
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG2 0x00000074
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG3 0x00000078
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG5 0x00000080
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG6 0x00000084
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG7 0x00000088
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG9 0x00000090
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG10 0x00000094
+
+#define REG_DSI_28nm_PHY_PLL_CAL_CFG11 0x00000098
+
+#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_42 0x000000a4
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_43 0x000000a8
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_44 0x000000ac
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_45 0x000000b0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_46 0x000000b4
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_47 0x000000b8
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_48 0x000000bc
+
+#define REG_DSI_28nm_PHY_PLL_STATUS 0x000000c0
+#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY 0x00000001
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0 0x000000c4
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1 0x000000c8
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2 0x000000cc
+
+#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3 0x000000d0
+
+#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
+
+
+#endif /* DSI_PHY_28NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h
new file mode 100644
index 000000000..44eeca31a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h
@@ -0,0 +1,286 @@
+#ifndef DSI_PHY_28NM_8960_XML
+#define DSI_PHY_28NM_8960_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_0(uint32_t i0) { return 0x00000000 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_1(uint32_t i0) { return 0x00000004 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_CFG_2(uint32_t i0) { return 0x00000008 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(uint32_t i0) { return 0x00000014 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(uint32_t i0) { return 0x00000018 + 0x40*i0; }
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_0 0x00000100
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_1 0x00000104
+
+#define REG_DSI_28nm_8960_PHY_LNCK_CFG_2 0x00000108
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH 0x0000010c
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0 0x00000114
+
+#define REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1 0x00000118
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_0 0x00000140
+#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_1 0x00000144
+#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_2 0x00000148
+#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_3 0x0000014c
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_4 0x00000150
+#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_5 0x00000154
+#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_6 0x00000158
+#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_7 0x0000015c
+#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_8 0x00000160
+#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_9 0x00000164
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK 0x00000007
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO__MASK;
+}
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK 0x00000070
+#define DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT 4
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_10 0x00000168
+#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK 0x00000007
+#define DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_TIMING_CTRL_11 0x0000016c
+#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff
+#define DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0
+static inline uint32_t DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
+{
+ return ((val) << DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD__MASK;
+}
+
+#define REG_DSI_28nm_8960_PHY_CTRL_0 0x00000170
+
+#define REG_DSI_28nm_8960_PHY_CTRL_1 0x00000174
+
+#define REG_DSI_28nm_8960_PHY_CTRL_2 0x00000178
+
+#define REG_DSI_28nm_8960_PHY_CTRL_3 0x0000017c
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_0 0x00000180
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_1 0x00000184
+
+#define REG_DSI_28nm_8960_PHY_STRENGTH_2 0x00000188
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_0 0x0000018c
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_1 0x00000190
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_2 0x00000194
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_3 0x00000198
+
+#define REG_DSI_28nm_8960_PHY_BIST_CTRL_4 0x0000019c
+
+#define REG_DSI_28nm_8960_PHY_LDO_CTRL 0x000001b0
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0 0x00000000
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG 0x00000018
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER 0x00000028
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_0 0x0000002c
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_1 0x00000030
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2 0x00000034
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0 0x00000038
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1 0x0000003c
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_2 0x00000040
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3 0x00000044
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4 0x00000048
+
+#define REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS 0x00000050
+#define DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY 0x00000010
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_0 0x00000000
+#define DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE 0x00000001
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_1 0x00000004
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_2 0x00000008
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_3 0x0000000c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_4 0x00000010
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_5 0x00000014
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_6 0x00000018
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_7 0x0000001c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_8 0x00000020
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_9 0x00000024
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_10 0x00000028
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_11 0x0000002c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_12 0x00000030
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_13 0x00000034
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_14 0x00000038
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_15 0x0000003c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_16 0x00000040
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_17 0x00000044
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_18 0x00000048
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_19 0x0000004c
+
+#define REG_DSI_28nm_8960_PHY_PLL_CTRL_20 0x00000050
+
+#define REG_DSI_28nm_8960_PHY_PLL_RDY 0x00000080
+#define DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY 0x00000001
+
+
+#endif /* DSI_PHY_28NM_8960_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h
new file mode 100644
index 000000000..5bc061797
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h
@@ -0,0 +1,483 @@
+#ifndef DSI_PHY_7NM_XML
+#define DSI_PHY_7NM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0 0x00000020
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_3 0x00000030
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG0 0x00000034
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG1 0x00000038
+
+#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL 0x0000003c
+
+#define REG_DSI_7nm_PHY_CMN_DPHY_SOT 0x00000040
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0 0x000000a0
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1 0x000000a4
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2 0x000000a8
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3 0x000000ac
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4 0x000000b0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0 0x000000b4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1 0x000000b8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2 0x000000bc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3 0x000000c0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4 0x000000c4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5 0x000000c8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6 0x000000cc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7 0x000000d0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8 0x000000d4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9 0x000000d8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10 0x000000dc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11 0x000000e0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12 0x000000e4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13 0x000000e8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0 0x000000ec
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1 0x000000f0
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL 0x000000f4
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL 0x000000f8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL 0x000000fc
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL 0x00000100
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0 0x00000104
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1 0x00000108
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL 0x0000010c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1 0x00000110
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_4 0x00000114
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4 0x00000128
+
+#define REG_DSI_7nm_PHY_CMN_PHY_STATUS 0x00000140
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0 0x00000148
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1 0x0000014c
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10 0x000001ac
+
+static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS 0x00000008
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO 0x0000000c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR 0x00000014
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE 0x00000018
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS 0x0000001c
+
+#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER 0x00000020
+
+#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000024
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES 0x00000028
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x0000002c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE 0x00000030
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CTRL 0x00000034
+
+#define REG_DSI_7nm_PHY_PLL_RSM_CTRL 0x00000038
+
+#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP 0x0000003c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL 0x00000040
+
+#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000044
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW 0x00000048
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH 0x0000004c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS 0x00000050
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN 0x00000054
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX 0x00000058
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT 0x0000005c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT 0x00000060
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO 0x00000064
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000068
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x0000006c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH 0x00000070
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW 0x00000074
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000078
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH 0x0000007c
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH 0x00000080
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW 0x00000084
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH 0x00000088
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW 0x0000008c
+
+#define REG_DSI_7nm_PHY_PLL_PFILT 0x00000090
+
+#define REG_DSI_7nm_PHY_PLL_IFILT 0x00000094
+
+#define REG_DSI_7nm_PHY_PLL_PLL_GAIN 0x00000098
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_LOW 0x0000009c
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH 0x000000a0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKDET 0x000000a4
+
+#define REG_DSI_7nm_PHY_PLL_OUTDIV 0x000000a8
+
+#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL 0x000000ac
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE 0x000000b0
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO 0x000000b4
+
+#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE 0x000000b8
+
+#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000bc
+
+#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE 0x000000c0
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS 0x000000c4
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000c8
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START 0x000000cc
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW 0x000000d0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID 0x000000d4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH 0x000000d8
+
+#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES 0x000000dc
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000e0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000e4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000e8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000ec
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2 0x000000f0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2 0x000000f4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2 0x000000f8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2 0x000000fc
+
+#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL 0x00000100
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW 0x00000104
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH 0x00000108
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW 0x0000010c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH 0x00000110
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW 0x00000114
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH 0x00000118
+
+#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL 0x0000011c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x00000120
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000124
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000128
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x0000012c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1 0x00000130
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1 0x00000134
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2 0x00000138
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2 0x0000013c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2 0x00000140
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2 0x00000144
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2 0x00000148
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2 0x0000014c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL 0x00000150
+
+#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000154
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000158
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2 0x0000015c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x00000160
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2 0x00000164
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1 0x00000168
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2 0x0000016c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x00000170
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2 0x00000174
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000178
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2 0x0000017c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND 0x00000180
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID 0x00000184
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x00000188
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x0000018c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000190
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY 0x00000194
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY 0x00000198
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS 0x0000019c
+
+#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES 0x000001a0
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1 0x000001a4
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2 0x000001a8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1 0x000001ac
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE 0x000001b0
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO 0x000001b4
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL 0x000001b8
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW 0x000001bc
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH 0x000001c0
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW 0x000001c4
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH 0x000001c8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1 0x000001cc
+
+#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG 0x000001d0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG 0x000001d4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME 0x000001d8
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE0 0x000001dc
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE1 0x000001e0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0 0x000001e4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1 0x000001e8
+
+#define REG_DSI_7nm_PHY_PLL_SW_RESET 0x000001ec
+
+#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP 0x000001f0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME0 0x000001f4
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME1 0x000001f8
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL 0x000001fc
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0 0x00000200
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1 0x00000204
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2 0x00000208
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3 0x0000020c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES 0x00000210
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG 0x00000214
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS 0x00000218
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS 0x0000021c
+
+#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS 0x00000220
+
+#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET 0x00000224
+
+#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS 0x00000228
+
+#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS 0x0000022c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS 0x00000230
+
+#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS 0x00000234
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS 0x00000238
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2 0x0000023c
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1 0x00000240
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2 0x00000244
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1 0x00000248
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2 0x0000024c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_1 0x00000250
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_2 0x00000254
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1 0x00000258
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2 0x0000025c
+
+#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE 0x00000260
+
+
+#endif /* DSI_PHY_7NM_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 000000000..03bc322d0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,131 @@
+#ifndef MMSS_CC_XML
+#define MMSS_CC_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mmss_cc_clk {
+ CLK = 0,
+ PCLK = 1,
+};
+
+#define REG_MMSS_CC_AHB 0x00000008
+
+static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
+{
+ switch (idx) {
+ case CLK: return 0x0000004c;
+ case PCLK: return 0x00000130;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+
+static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
+#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
+#define MMSS_CC_CLK_CC_MND_EN 0x00000020
+#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
+#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
+static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
+}
+#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
+#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
+#define MMSS_CC_CLK_MD_D__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
+}
+#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
+#define MMSS_CC_CLK_MD_M__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
+#define MMSS_CC_CLK_NS_SRC__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
+}
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
+static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
+}
+#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
+#define MMSS_CC_CLK_NS_VAL__SHIFT 24
+static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
+}
+
+#define REG_MMSS_CC_DSI2_PIXEL_CC 0x00000094
+
+#define REG_MMSS_CC_DSI2_PIXEL_NS 0x000000e4
+
+#define REG_MMSS_CC_DSI2_PIXEL_CC2 0x00000264
+
+
+#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
new file mode 100644
index 000000000..62bc3756f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "dsi_phy.h"
+
+#define S_DIV_ROUND_UP(n, d) \
+ (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
+
+static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
+ s32 min_result, bool even)
+{
+ s32 v;
+
+ v = (tmax - tmin) * percent;
+ v = S_DIV_ROUND_UP(v, 100) + tmin;
+ if (even && (v & 0x1))
+ return max_t(s32, min_result, v - 1);
+ else
+ return max_t(s32, min_result, v);
+}
+
+static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
+ s32 ui, s32 coeff, s32 pcnt)
+{
+ s32 tmax, tmin, clk_z;
+ s32 temp;
+
+ /* reset */
+ temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ if (tmin > 255) {
+ tmax = 511;
+ clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
+ } else {
+ tmax = 255;
+ clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
+ }
+
+ /* adjust */
+ temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
+ timing->clk_zero = clk_z + 8 - temp;
+}
+
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 10;
+ s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
+ tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
+
+ temp = lpx / ui;
+ if (temp & 0x1)
+ timing->hs_rqst = temp;
+ else
+ timing->hs_rqst = max_t(s32, 0, temp - 2);
+
+ /* Calculate clk_zero after clk_prepare and hs_rqst */
+ dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ temp = 85 * coeff + 6 * ui;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 40 * coeff + 4 * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
+
+ tmax = 255;
+ temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
+ temp = 145 * coeff + 10 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
+
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = S_DIV_ROUND_UP(temp, ui) - 2;
+ temp = 60 * coeff + 4 * ui;
+ tmin = DIV_ROUND_UP(temp, ui) - 2;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
+
+ tmax = 255;
+ tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
+
+ tmax = 63;
+ temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
+ temp = 60 * coeff + 52 * ui - 24 * ui - temp;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
+ false);
+ tmax = 63;
+ temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
+ temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
+ temp += 8 * ui + lpx;
+ tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
+ if (tmin > tmax) {
+ temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = true;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = false;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst);
+
+ return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8;
+ s32 tmax, tmin;
+ s32 pcnt0 = 50;
+ s32 pcnt1 = 50;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = 30;
+ s32 pcnt4 = 10;
+ s32 pcnt5 = 2;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en, hb_en_ckln, pd_ckln, pd;
+ s32 val, val_ckln;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ timing->hs_halfbyte_en = 0;
+ hb_en = 0;
+ timing->hs_halfbyte_en_ckln = 0;
+ hb_en_ckln = 0;
+ timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
+ pd_ckln = timing->hs_prep_dly_ckln;
+ timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
+ pd = timing->hs_prep_dly;
+
+ val = (hb_en << 2) + (pd << 1);
+ val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+
+ temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff - val_ckln * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+ temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
+ tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+ temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
+ tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+ timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+ temp = 60 * coeff + 52 * ui - 43 * ui;
+ tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ timing->shared_timings.clk_post =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
+ temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
+ temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+ (((timing->hs_rqst_ckln << 3) + 8) * ui);
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ if (tmin > tmax) {
+ temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = 1;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+ timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+ timing->hs_prep_dly_ckln);
+
+ return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8;
+ s32 tmax, tmin;
+ s32 pcnt0 = 50;
+ s32 pcnt1 = 50;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = 30;
+ s32 pcnt4 = 10;
+ s32 pcnt5 = 2;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en, hb_en_ckln;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ timing->hs_halfbyte_en = 0;
+ hb_en = 0;
+ timing->hs_halfbyte_en_ckln = 0;
+ hb_en_ckln = 0;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+ temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+ temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp / ui_x8) - 1;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+ timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+ temp = 60 * coeff + 52 * ui - 43 * ui;
+ tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ timing->shared_timings.clk_post =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 8 * ui + (timing->clk_prepare << 3) * ui;
+ temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
+ temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+ (((timing->hs_rqst_ckln << 3) + 8) * ui);
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ if (tmin > tmax) {
+ temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = 1;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+ timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+ timing->hs_prep_dly_ckln);
+
+ return 0;
+}
+
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8;
+ s32 tmax, tmin;
+ s32 pcnt_clk_prep = 50;
+ s32 pcnt_clk_zero = 2;
+ s32 pcnt_clk_trail = 30;
+ s32 pcnt_hs_prep = 50;
+ s32 pcnt_hs_zero = 10;
+ s32 pcnt_hs_trail = 30;
+ s32 pcnt_hs_exit = 10;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ hb_en = 0;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+
+ /* TODO: verify these calculations against latest downstream driver
+ * everything except clk_post/clk_pre uses calculations from v3 based
+ * on the downstream driver having the same calculations for v3 and v4
+ */
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
+
+ temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
+
+ temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp / ui_x8) - 1;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
+
+ /* recommended min
+ * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
+ */
+ temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
+ tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+ tmax = 255;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
+
+ /* recommended min
+ * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
+ * val2 = (16 * bit_clk_ns)
+ * final = roundup(val1/val2, 0) - 1
+ */
+ temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
+ tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+ tmax = 255;
+ timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
+
+ return 0;
+}
+
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x7;
+ s32 tmax, tmin;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x7 = ui * 7;
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x7);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x7;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, 50, 0, false);
+
+ tmin = DIV_ROUND_UP(50 * coeff, ui_x7);
+ tmax = 255;
+ timing->hs_rqst = linear_inter(tmax, tmin, 1, 0, false);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x7) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, 10, 0, false);
+
+ tmin = 1;
+ tmax = 32;
+ timing->shared_timings.clk_post = linear_inter(tmax, tmin, 80, 0, false);
+
+ tmin = min_t(s32, 64, S_DIV_ROUND_UP(262 * coeff, ui_x7) - 1);
+ tmax = 64;
+ timing->shared_timings.clk_pre = linear_inter(tmax, tmin, 20, 0, false);
+
+ DBG("%d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->clk_prepare, timing->hs_exit, timing->hs_rqst);
+
+ return 0;
+}
+
+static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
+{
+ struct device *dev = &phy->pdev->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->ahb_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ pm_runtime_put_sync(dev);
+ }
+
+ return ret;
+}
+
+static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
+{
+ clk_disable_unprepare(phy->ahb_clk);
+ pm_runtime_put(&phy->pdev->dev);
+}
+
+static const struct of_device_id dsi_phy_dt_match[] = {
+#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
+ { .compatible = "qcom,dsi-phy-28nm-hpm",
+ .data = &dsi_phy_28nm_hpm_cfgs },
+ { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
+ .data = &dsi_phy_28nm_hpm_famb_cfgs },
+ { .compatible = "qcom,dsi-phy-28nm-lp",
+ .data = &dsi_phy_28nm_lp_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
+ { .compatible = "qcom,dsi-phy-20nm",
+ .data = &dsi_phy_20nm_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
+ { .compatible = "qcom,dsi-phy-28nm-8960",
+ .data = &dsi_phy_28nm_8960_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
+ { .compatible = "qcom,dsi-phy-14nm",
+ .data = &dsi_phy_14nm_cfgs },
+ { .compatible = "qcom,dsi-phy-14nm-660",
+ .data = &dsi_phy_14nm_660_cfgs },
+ { .compatible = "qcom,dsi-phy-14nm-8953",
+ .data = &dsi_phy_14nm_8953_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+ { .compatible = "qcom,dsi-phy-10nm",
+ .data = &dsi_phy_10nm_cfgs },
+ { .compatible = "qcom,dsi-phy-10nm-8998",
+ .data = &dsi_phy_10nm_8998_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+ { .compatible = "qcom,dsi-phy-7nm",
+ .data = &dsi_phy_7nm_cfgs },
+ { .compatible = "qcom,dsi-phy-7nm-8150",
+ .data = &dsi_phy_7nm_8150_cfgs },
+ { .compatible = "qcom,sc7280-dsi-phy-7nm",
+ .data = &dsi_phy_7nm_7280_cfgs },
+#endif
+ {}
+};
+
+/*
+ * Currently, we only support one SoC for each PHY type. When we have multiple
+ * SoCs for the same PHY, we can try to make the index searching a bit more
+ * clever.
+ */
+static int dsi_phy_get_id(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ const struct msm_dsi_phy_cfg *cfg = phy->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi_phy; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+ struct msm_dsi_phy *phy;
+ struct device *dev = &pdev->dev;
+ u32 phy_type;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->provided_clocks = devm_kzalloc(dev,
+ struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
+ GFP_KERNEL);
+ if (!phy->provided_clocks)
+ return -ENOMEM;
+
+ phy->provided_clocks->num = NUM_PROVIDED_CLKS;
+
+ phy->cfg = of_device_get_match_data(&pdev->dev);
+ if (!phy->cfg)
+ return -ENODEV;
+
+ phy->pdev = pdev;
+
+ phy->id = dsi_phy_get_id(phy);
+ if (phy->id < 0)
+ return dev_err_probe(dev, phy->id,
+ "Couldn't identify PHY index\n");
+
+ phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
+ "qcom,dsi-phy-regulator-ldo-mode");
+ if (!of_property_read_u32(dev->of_node, "phy-type", &phy_type))
+ phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
+
+ phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
+ if (IS_ERR(phy->base))
+ return dev_err_probe(dev, PTR_ERR(phy->base),
+ "Failed to map phy base\n");
+
+ phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
+ if (IS_ERR(phy->pll_base))
+ return dev_err_probe(dev, PTR_ERR(phy->pll_base),
+ "Failed to map pll base\n");
+
+ if (phy->cfg->has_phy_lane) {
+ phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
+ if (IS_ERR(phy->lane_base))
+ return dev_err_probe(dev, PTR_ERR(phy->lane_base),
+ "Failed to map phy lane base\n");
+ }
+
+ if (phy->cfg->has_phy_regulator) {
+ phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
+ if (IS_ERR(phy->reg_base))
+ return dev_err_probe(dev, PTR_ERR(phy->reg_base),
+ "Failed to map phy regulator base\n");
+ }
+
+ if (phy->cfg->ops.parse_dt_properties) {
+ ret = phy->cfg->ops.parse_dt_properties(phy);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
+ phy->cfg->regulator_data,
+ &phy->supplies);
+ if (ret)
+ return ret;
+
+ phy->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(phy->ahb_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
+ "Unable to get ahb clk\n");
+
+ /* PLL init will call into clk_register which requires
+ * register access, so we need to enable power and ahb clock.
+ */
+ ret = dsi_phy_enable_resource(phy);
+ if (ret)
+ return ret;
+
+ if (phy->cfg->ops.pll_init) {
+ ret = phy->cfg->ops.pll_init(phy);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "PLL init failed; need separate clk driver\n");
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ phy->provided_clocks);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clk provider\n");
+
+ dsi_phy_disable_resource(phy);
+
+ platform_set_drvdata(pdev, phy);
+
+ return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+ .probe = dsi_phy_driver_probe,
+ .driver = {
+ .name = "msm_dsi_phy",
+ .of_match_table = dsi_phy_dt_match,
+ },
+};
+
+void __init msm_dsi_phy_driver_register(void)
+{
+ platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void __exit msm_dsi_phy_driver_unregister(void)
+{
+ platform_driver_unregister(&dsi_phy_platform_driver);
+}
+
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req,
+ struct msm_dsi_phy_shared_timings *shared_timings)
+{
+ struct device *dev;
+ int ret;
+
+ if (!phy || !phy->cfg->ops.enable)
+ return -EINVAL;
+
+ dev = &phy->pdev->dev;
+
+ ret = dsi_phy_enable_resource(phy);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
+ __func__, ret);
+ goto res_en_fail;
+ }
+
+ ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
+ __func__, ret);
+ goto reg_en_fail;
+ }
+
+ ret = phy->cfg->ops.enable(phy, clk_req);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ goto phy_en_fail;
+ }
+
+ memcpy(shared_timings, &phy->timing.shared_timings,
+ sizeof(*shared_timings));
+
+ /*
+ * Resetting DSI PHY silently changes its PLL registers to reset status,
+ * which will confuse clock driver and result in wrong output rate of
+ * link clocks. Restore PLL status if its PLL is being used as clock
+ * source.
+ */
+ if (phy->usecase != MSM_DSI_PHY_SLAVE) {
+ ret = msm_dsi_phy_pll_restore_state(phy);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
+ __func__, ret);
+ goto pll_restor_fail;
+ }
+ }
+
+ return 0;
+
+pll_restor_fail:
+ if (phy->cfg->ops.disable)
+ phy->cfg->ops.disable(phy);
+phy_en_fail:
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
+reg_en_fail:
+ dsi_phy_disable_resource(phy);
+res_en_fail:
+ return ret;
+}
+
+void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+ if (!phy || !phy->cfg->ops.disable)
+ return;
+
+ phy->cfg->ops.disable(phy);
+
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
+ dsi_phy_disable_resource(phy);
+}
+
+void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
+ enum msm_dsi_phy_usecase uc)
+{
+ if (phy)
+ phy->usecase = uc;
+}
+
+/* Returns true if we have to clear DSI_LANE_CTRL.HS_REQ_SEL_PHY */
+bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!phy || !phy->cfg->ops.set_continuous_clock)
+ return false;
+
+ return phy->cfg->ops.set_continuous_clock(phy, enable);
+}
+
+void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
+{
+ if (phy->cfg->ops.save_pll_state) {
+ phy->cfg->ops.save_pll_state(phy);
+ phy->state_saved = true;
+ }
+}
+
+int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ int ret;
+
+ if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
+ ret = phy->cfg->ops.restore_pll_state(phy);
+ if (ret)
+ return ret;
+
+ phy->state_saved = false;
+ }
+
+ return 0;
+}
+
+void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy)
+{
+ msm_disp_snapshot_add_block(disp_state,
+ phy->base_size, phy->base,
+ "dsi%d_phy", phy->id);
+
+ /* Do not try accessing PLL registers if it is switched off */
+ if (phy->pll_on)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->pll_size, phy->pll_base,
+ "dsi%d_pll", phy->id);
+
+ if (phy->lane_base)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->lane_size, phy->lane_base,
+ "dsi%d_lane", phy->id);
+
+ if (phy->reg_base)
+ msm_disp_snapshot_add_block(disp_state,
+ phy->reg_size, phy->reg_base,
+ "dsi%d_reg", phy->id);
+}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
new file mode 100644
index 000000000..60a99c652
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DSI_PHY_H__
+#define __DSI_PHY_H__
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+#include "dsi.h"
+
+#define dsi_phy_read(offset) msm_readl((offset))
+#define dsi_phy_write(offset, data) msm_writel((data), (offset))
+#define dsi_phy_write_udelay(offset, data, delay_us) { msm_writel((data), (offset)); udelay(delay_us); }
+#define dsi_phy_write_ndelay(offset, data, delay_ns) { msm_writel((data), (offset)); ndelay(delay_ns); }
+
+struct msm_dsi_phy_ops {
+ int (*pll_init)(struct msm_dsi_phy *phy);
+ int (*enable)(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req);
+ void (*disable)(struct msm_dsi_phy *phy);
+ void (*save_pll_state)(struct msm_dsi_phy *phy);
+ int (*restore_pll_state)(struct msm_dsi_phy *phy);
+ bool (*set_continuous_clock)(struct msm_dsi_phy *phy, bool enable);
+ int (*parse_dt_properties)(struct msm_dsi_phy *phy);
+};
+
+struct msm_dsi_phy_cfg {
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
+ struct msm_dsi_phy_ops ops;
+
+ unsigned long min_pll_rate;
+ unsigned long max_pll_rate;
+
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi_phy;
+ const int quirks;
+ bool has_phy_regulator;
+ bool has_phy_lane;
+};
+
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
+
+struct msm_dsi_dphy_timing {
+ u32 clk_zero;
+ u32 clk_trail;
+ u32 clk_prepare;
+ u32 hs_exit;
+ u32 hs_zero;
+ u32 hs_prepare;
+ u32 hs_trail;
+ u32 hs_rqst;
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+
+ struct msm_dsi_phy_shared_timings shared_timings;
+
+ /* For PHY v2 only */
+ u32 hs_rqst_ckln;
+ u32 hs_prep_dly;
+ u32 hs_prep_dly_ckln;
+ u8 hs_halfbyte_en;
+ u8 hs_halfbyte_en_ckln;
+};
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+#define NUM_PROVIDED_CLKS 2
+
+#define DSI_LANE_MAX 5
+
+struct msm_dsi_phy {
+ struct platform_device *pdev;
+ void __iomem *base;
+ void __iomem *pll_base;
+ void __iomem *reg_base;
+ void __iomem *lane_base;
+ phys_addr_t base_size;
+ phys_addr_t pll_size;
+ phys_addr_t reg_size;
+ phys_addr_t lane_size;
+ int id;
+
+ struct clk *ahb_clk;
+ struct regulator_bulk_data *supplies;
+
+ struct msm_dsi_dphy_timing timing;
+ const struct msm_dsi_phy_cfg *cfg;
+ void *tuning_cfg;
+
+ enum msm_dsi_phy_usecase usecase;
+ bool regulator_ldo_mode;
+ bool cphy_mode;
+
+ struct clk_hw *vco_hw;
+ bool pll_on;
+
+ struct clk_hw_onecell_data *provided_clocks;
+
+ bool state_saved;
+};
+
+/*
+ * PHY internal functions
+ */
+int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
+
+#endif /* __DSI_PHY_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
new file mode 100644
index 000000000..27b592c77
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -0,0 +1,1061 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_10nm.xml.h"
+
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define VCO_REF_CLK_RATE 19200000
+#define FRAC_BITS 18
+
+/* v3.0.0 10nm implementation that requires the old timings settings */
+#define DSI_PHY_10NM_QUIRK_OLD_TIMINGS BIT(0)
+
+struct dsi_pll_config {
+ bool enable_ssc;
+ bool ssc_center;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+
+ /* out */
+ u32 pll_prop_gain_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize;
+ u32 ssc_div_per;
+};
+
+struct pll_10nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ struct pll_10nm_cached_state cached_state;
+
+ struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
+
+/**
+ * struct dsi_phy_10nm_tuning_cfg - Holds 10nm PHY tuning config parameters.
+ * @rescode_offset_top: Offset for pull-up legs rescode.
+ * @rescode_offset_bot: Offset for pull-down legs rescode.
+ * @vreg_ctrl: vreg ctrl to drive LDO level
+ */
+struct dsi_phy_10nm_tuning_cfg {
+ u8 rescode_offset_top[DSI_LANE_MAX];
+ u8 rescode_offset_bot[DSI_LANE_MAX];
+ u8 vreg_ctrl;
+};
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_config *config)
+{
+ config->ssc_freq = 31500;
+ config->ssc_offset = 5000;
+ config->ssc_adj_per = 2;
+
+ config->enable_ssc = false;
+ config->ssc_center = false;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ u64 fref = VCO_REF_CLK_RATE;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ divider = fref * 2;
+
+ multiplier = 1 << FRAC_BITS;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
+
+ if (pll_freq <= 1900000000UL)
+ config->pll_prop_gain_rate = 8;
+ else if (pll_freq <= 3000000000UL)
+ config->pll_prop_gain_rate = 10;
+ else
+ config->pll_prop_gain_rate = 12;
+ if (pll_freq < 1100000000UL)
+ config->pll_clock_inverters = 8;
+ else
+ config->pll_clock_inverters = 0;
+
+ config->decimal_div_start = dec;
+ config->frac_div_start = frac;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = config->frac_div_start;
+ ssc_step_size = config->decimal_div_start;
+ ssc_step_size *= (1 << FRAC_BITS);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ config->ssc_div_per = ssc_per;
+ config->ssc_stepsize = ssc_step_size;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ config->decimal_div_start, frac, FRAC_BITS);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ if (config->enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ config->ssc_stepsize & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ config->ssc_stepsize >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ config->ssc_div_per & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ config->ssc_div_per >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+ config->ssc_adj_per & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+ config->ssc_adj_per >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+ 0xba);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
+ 0x0c);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
+ 0x08);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
+ 0xfa);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+ 0x4c);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+ config->decimal_div_start);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+ config->frac_div_start & 0xff);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+ (config->frac_div_start & 0xff00) >> 8);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+ (config->frac_div_start & 0x30000) >> 16);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+ config->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+ struct dsi_pll_config config;
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
+ parent_rate);
+
+ pll_10nm->vco_current_rate = rate;
+
+ dsi_pll_setup_config(&config);
+
+ dsi_pll_calc_dec_frac(pll_10nm, &config);
+
+ dsi_pll_calc_ssc(pll_10nm, &config);
+
+ dsi_pll_commit(pll_10nm, &config);
+
+ dsi_pll_config_hzindep_reg(pll_10nm);
+
+ dsi_pll_ssc_commit(pll_10nm, &config);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+ struct device *dev = &pll->phy->pdev->dev;
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->phy->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data | BIT(5));
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+ struct device *dev = &pll_10nm->phy->pdev->dev;
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+ if (rc) {
+ DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Start PLL */
+ dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+ 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_10nm_lock_status(pll_10nm);
+ if (rc) {
+ DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
+ goto error;
+ }
+
+ pll_10nm->phy->pll_on = true;
+
+ dsi_pll_enable_global_clk(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_global_clk(pll_10nm->slave);
+
+ dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+ 0x01);
+ if (pll_10nm->slave)
+ dsi_phy_write(pll_10nm->slave->phy->base +
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+ dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_10nm);
+ dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_10nm);
+ if (pll_10nm->slave) {
+ dsi_pll_disable_global_clk(pll_10nm->slave);
+ dsi_pll_disable_sub(pll_10nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll_10nm->phy->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+ void __iomem *base = pll_10nm->phy->pll_base;
+ u64 ref_clk = VCO_REF_CLK_RATE;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ */
+ multiplier = 1 << FRAC_BITS;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+ pll_10nm->vco_current_rate = vco_rate;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+
+ if (rate < pll_10nm->phy->cfg->min_pll_rate)
+ return pll_10nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_10nm->phy->cfg->max_pll_rate)
+ return pll_10nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+ .round_rate = dsi_pll_10nm_clk_round_rate,
+ .set_rate = dsi_pll_10nm_vco_set_rate,
+ .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+ .prepare = dsi_pll_10nm_vco_prepare,
+ .unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy->base;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy->base;
+ u32 val;
+ int ret;
+
+ val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+ ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
+ pll_10nm->vco_current_rate,
+ VCO_REF_CLK_RATE);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ DBG("DSI PLL%d", pll_10nm->phy->id);
+
+ return 0;
+}
+
+static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+ void __iomem *base = phy->base;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_10nm->phy->id);
+
+ switch (phy->usecase) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ return 0;
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_10nm_vco,
+ };
+ struct device *dev = &pll_10nm->phy->pdev->dev;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *pclk_mux;
+ int ret;
+
+ DBG("DSI%d", pll_10nm->phy->id);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id);
+ pll_10nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_10nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
+
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
+
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id);
+
+ pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ pll_out_div,
+ pll_post_out_div,
+ }), 4, 0, pll_10nm->phy->base +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL);
+ if (IS_ERR(pclk_mux)) {
+ ret = PTR_ERR(pclk_mux);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux,
+ 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+
+fail:
+
+ return ret;
+}
+
+static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_10nm *pll_10nm;
+ int ret;
+
+ pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+ if (!pll_10nm)
+ return -ENOMEM;
+
+ DBG("DSI PLL%d", phy->id);
+
+ pll_10nm_list[phy->id] = pll_10nm;
+
+ spin_lock_init(&pll_10nm->postdiv_lock);
+
+ pll_10nm->phy = phy;
+
+ ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_10nm->clk_hw;
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_phy_pll_save_state(phy);
+
+ return 0;
+}
+
+static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ void __iomem *lane_base = phy->lane_base;
+ struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
+
+ if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
+ tx_dctrl[3] = 0x02;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
+ 0x55);
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
+ 0x88);
+ }
+
+ dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
+ i == 4 ? 0x80 : 0x0);
+
+ /* platform specific dsi phy drive strength adjustment */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i),
+ tuning_cfg->rescode_offset_top[i]);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i),
+ tuning_cfg->rescode_offset_bot[i]);
+
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
+ tx_dctrl[i]);
+ }
+
+ if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
+ /* Toggle BIT 0 to release freeze I/0 */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+ }
+}
+
+static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ struct dsi_phy_10nm_tuning_cfg *tuning_cfg = phy->tuning_cfg;
+ u32 data;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v3_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* Select MS1 byte-clk */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
+
+ /* Enable LDO with platform specific drive level/amplitude adjustment */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL,
+ tuning_cfg->vreg_ctrl);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ /* DSI PHY timings */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
+ timing->hs_halfbyte_en);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
+ timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
+ timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
+ timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
+ timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
+ timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
+ timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
+ timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
+ timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
+ timing->ta_go | (timing->ta_sure << 3));
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
+ timing->ta_get);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
+ 0x00);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
+
+ /* power up lanes */
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ /* TODO: only power up lanes that are used */
+ data |= 0x1F;
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
+
+ /* Select full-rate mode */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = dsi_10nm_set_usecase(phy);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI lane settings */
+ dsi_phy_hw_v3_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ DBG("");
+
+ if (dsi_phy_hw_v3_0_is_pll_on(phy))
+ pr_warn("Turning OFF PHY while PLL is on\n");
+
+ dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ /* disable all lanes */
+ data &= ~0x1F;
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
+
+ /* Turn off all PHY blocks */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
+ /* make sure phy is turned off */
+ wmb();
+
+ DBG("DSI%d PHY disabled", phy->id);
+}
+
+static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
+{
+ struct device *dev = &phy->pdev->dev;
+ struct dsi_phy_10nm_tuning_cfg *tuning_cfg;
+ s8 offset_top[DSI_LANE_MAX] = { 0 }; /* No offset */
+ s8 offset_bot[DSI_LANE_MAX] = { 0 }; /* No offset */
+ u32 ldo_level = 400; /* 400mV */
+ u8 level;
+ int ret, i;
+
+ tuning_cfg = devm_kzalloc(dev, sizeof(*tuning_cfg), GFP_KERNEL);
+ if (!tuning_cfg)
+ return -ENOMEM;
+
+ /* Drive strength adjustment parameters */
+ ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-top",
+ offset_top, DSI_LANE_MAX);
+ if (ret && ret != -EINVAL) {
+ DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-top, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < DSI_LANE_MAX; i++) {
+ if (offset_top[i] < -32 || offset_top[i] > 31) {
+ DRM_DEV_ERROR(dev,
+ "qcom,phy-rescode-offset-top value %d is not in range [-32..31]\n",
+ offset_top[i]);
+ return -EINVAL;
+ }
+ tuning_cfg->rescode_offset_top[i] = 0x3f & offset_top[i];
+ }
+
+ ret = of_property_read_u8_array(dev->of_node, "qcom,phy-rescode-offset-bot",
+ offset_bot, DSI_LANE_MAX);
+ if (ret && ret != -EINVAL) {
+ DRM_DEV_ERROR(dev, "failed to parse qcom,phy-rescode-offset-bot, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < DSI_LANE_MAX; i++) {
+ if (offset_bot[i] < -32 || offset_bot[i] > 31) {
+ DRM_DEV_ERROR(dev,
+ "qcom,phy-rescode-offset-bot value %d is not in range [-32..31]\n",
+ offset_bot[i]);
+ return -EINVAL;
+ }
+ tuning_cfg->rescode_offset_bot[i] = 0x3f & offset_bot[i];
+ }
+
+ /* Drive level/amplitude adjustment parameters */
+ ret = of_property_read_u32(dev->of_node, "qcom,phy-drive-ldo-level", &ldo_level);
+ if (ret && ret != -EINVAL) {
+ DRM_DEV_ERROR(dev, "failed to parse qcom,phy-drive-ldo-level, %d\n", ret);
+ return ret;
+ }
+
+ switch (ldo_level) {
+ case 375:
+ level = 0;
+ break;
+ case 400:
+ level = 1;
+ break;
+ case 425:
+ level = 2;
+ break;
+ case 450:
+ level = 3;
+ break;
+ case 475:
+ level = 4;
+ break;
+ case 500:
+ level = 5;
+ break;
+ default:
+ DRM_DEV_ERROR(dev, "qcom,phy-drive-ldo-level %d is not supported\n", ldo_level);
+ return -EINVAL;
+ }
+ tuning_cfg->vreg_ctrl = 0x58 | (0x7 & level);
+
+ phy->tuning_cfg = tuning_cfg;
+
+ return 0;
+}
+
+static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .pll_init = dsi_pll_10nm_init,
+ .save_pll_state = dsi_10nm_pll_save_state,
+ .restore_pll_state = dsi_10nm_pll_restore_state,
+ .parse_dt_properties = dsi_10nm_phy_parse_dt,
+ },
+ .min_pll_rate = 1000000000UL,
+ .max_pll_rate = 3500000000UL,
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .pll_init = dsi_pll_10nm_init,
+ .save_pll_state = dsi_10nm_pll_save_state,
+ .restore_pll_state = dsi_10nm_pll_restore_state,
+ .parse_dt_properties = dsi_10nm_phy_parse_dt,
+ },
+ .min_pll_rate = 1000000000UL,
+ .max_pll_rate = 3500000000UL,
+ .io_start = { 0xc994400, 0xc996400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
new file mode 100644
index 000000000..f0780c40b
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_14nm.xml.h"
+
+#define PHY_14NM_CKLN_IDX 4
+
+/*
+ * DSI PLL 14nm - clock diagram (eg: DSI0):
+ *
+ * dsi0n1_postdiv_clk
+ * |
+ * |
+ * +----+ | +----+
+ * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
+ * +----+ | +----+
+ * | dsi0n1_postdivby2_clk
+ * | +----+ |
+ * o---| /2 |--o--|\
+ * | +----+ | \ +----+
+ * | | |--| n2 |-- dsi0pll
+ * o--------------| / +----+
+ * |/
+ */
+
+#define POLL_MAX_READS 15
+#define POLL_TIMEOUT_US 1000
+
+#define VCO_REF_CLK_RATE 19200000
+#define VCO_MIN_RATE 1300000000UL
+#define VCO_MAX_RATE 2600000000UL
+
+struct dsi_pll_config {
+ u64 vco_current_rate;
+
+ u32 ssc_en; /* SSC enable/disable */
+
+ /* fixed params */
+ u32 plllock_cnt;
+ u32 ssc_center;
+ u32 ssc_adj_period;
+ u32 ssc_spread;
+ u32 ssc_freq;
+
+ /* calculated */
+ u32 dec_start;
+ u32 div_frac_start;
+ u32 ssc_period;
+ u32 ssc_step_size;
+ u32 plllock_cmp;
+ u32 pll_vco_div_ref;
+ u32 pll_vco_count;
+ u32 pll_kvco_div_ref;
+ u32 pll_kvco_count;
+};
+
+struct pll_14nm_cached_state {
+ unsigned long vco_rate;
+ u8 n2postdiv;
+ u8 n1postdiv;
+};
+
+struct dsi_pll_14nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ struct pll_14nm_cached_state cached_state;
+
+ struct dsi_pll_14nm *slave;
+};
+
+#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, clk_hw)
+
+/*
+ * Private struct for N1/N2 post-divider clocks. These clocks are similar to
+ * the generic clk_divider class of clocks. The only difference is that it
+ * also sets the slave DSI PLL's post-dividers if in bonded DSI mode
+ */
+struct dsi_pll_14nm_postdiv {
+ struct clk_hw hw;
+
+ /* divider params */
+ u8 shift;
+ u8 width;
+ u8 flags; /* same flags as used by clk_divider struct */
+
+ struct dsi_pll_14nm *pll;
+};
+
+#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
+
+static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
+ u32 nb_tries, u32 timeout_us)
+{
+ bool pll_locked = false, pll_ready = false;
+ void __iomem *base = pll_14nm->phy->pll_base;
+ u32 tries, val;
+
+ tries = nb_tries;
+ while (tries--) {
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_locked = !!(val & BIT(5));
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+
+ if (!pll_locked)
+ goto out;
+
+ tries = nb_tries;
+ while (tries--) {
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+ pll_ready = !!(val & BIT(0));
+
+ if (pll_ready)
+ break;
+
+ udelay(timeout_us);
+ }
+
+out:
+ DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* ");
+
+ return pll_locked && pll_ready;
+}
+
+static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
+{
+ /* fixed input */
+ pconf->plllock_cnt = 1;
+
+ /*
+ * SSC is enabled by default. We might need DT props for configuring
+ * some SSC params like PPM and center/down spread etc.
+ */
+ pconf->ssc_en = 1;
+ pconf->ssc_center = 0; /* down spread by default */
+ pconf->ssc_spread = 5; /* PPM / 1000 */
+ pconf->ssc_freq = 31500; /* default recommended */
+ pconf->ssc_adj_period = 37;
+}
+
+#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
+
+static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ u32 period, ssc_period;
+ u32 ref, rem;
+ u64 step_size;
+
+ DBG("vco=%lld ref=%d", pconf->vco_current_rate, VCO_REF_CLK_RATE);
+
+ ssc_period = pconf->ssc_freq / 500;
+ period = (u32)VCO_REF_CLK_RATE / 1000;
+ ssc_period = CEIL(period, ssc_period);
+ ssc_period -= 1;
+ pconf->ssc_period = ssc_period;
+
+ DBG("ssc freq=%d spread=%d period=%d", pconf->ssc_freq,
+ pconf->ssc_spread, pconf->ssc_period);
+
+ step_size = (u32)pconf->vco_current_rate;
+ ref = VCO_REF_CLK_RATE;
+ ref /= 1000;
+ step_size = div_u64(step_size, ref);
+ step_size <<= 20;
+ step_size = div_u64(step_size, 1000);
+ step_size *= pconf->ssc_spread;
+ step_size = div_u64(step_size, 1000);
+ step_size *= (pconf->ssc_adj_period + 1);
+
+ rem = 0;
+ step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
+ if (rem)
+ step_size++;
+
+ DBG("step_size=%lld", step_size);
+
+ step_size &= 0x0ffff; /* take lower 16 bits */
+
+ pconf->ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ u64 multiplier = BIT(20);
+ u64 dec_start_multiple, dec_start, pll_comp_val;
+ u32 duration, div_frac_start;
+ u64 vco_clk_rate = pconf->vco_current_rate;
+ u64 fref = VCO_REF_CLK_RATE;
+
+ DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
+
+ dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
+ dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+ pconf->dec_start = (u32)dec_start;
+ pconf->div_frac_start = div_frac_start;
+
+ if (pconf->plllock_cnt == 0)
+ duration = 1024;
+ else if (pconf->plllock_cnt == 1)
+ duration = 256;
+ else if (pconf->plllock_cnt == 2)
+ duration = 128;
+ else
+ duration = 32;
+
+ pll_comp_val = duration * dec_start_multiple;
+ pll_comp_val = div_u64(pll_comp_val, multiplier);
+ do_div(pll_comp_val, 10);
+
+ pconf->plllock_cmp = (u32)pll_comp_val;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+ u32 slop = 0;
+
+ if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
+ slop = 600;
+ else if (vrate > 1800000000UL && vrate < 2300000000UL)
+ slop = 400;
+ else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
+ slop = 280;
+
+ return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ u64 vco_clk_rate = pconf->vco_current_rate;
+ u64 fref = VCO_REF_CLK_RATE;
+ u32 vco_measure_time = 5;
+ u32 kvco_measure_time = 5;
+ u64 data;
+ u32 cnt;
+
+ data = fref * vco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 2;
+ pconf->pll_vco_div_ref = data;
+
+ data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */
+ data *= vco_measure_time;
+ do_div(data, 10);
+ pconf->pll_vco_count = data;
+
+ data = fref * kvco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 1;
+ pconf->pll_kvco_div_ref = data;
+
+ cnt = pll_14nm_kvco_slop(vco_clk_rate);
+ cnt *= 2;
+ cnt /= 100;
+ cnt *= kvco_measure_time;
+ pconf->pll_kvco_count = cnt;
+}
+
+static void pll_db_commit_ssc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+ void __iomem *base = pll->phy->pll_base;
+ u8 data;
+
+ data = pconf->ssc_adj_period;
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
+ data = (pconf->ssc_adj_period >> 8);
+ data &= 0x03;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
+
+ data = pconf->ssc_period;
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
+ data = (pconf->ssc_period >> 8);
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
+
+ data = pconf->ssc_step_size;
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
+ data = (pconf->ssc_step_size >> 8);
+ data &= 0x0ff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
+
+ data = (pconf->ssc_center & 0x01);
+ data <<= 1;
+ data |= 0x01; /* enable */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
+
+ wmb(); /* make sure register committed */
+}
+
+static void pll_db_commit_common(struct dsi_pll_14nm *pll,
+ struct dsi_pll_config *pconf)
+{
+ void __iomem *base = pll->phy->pll_base;
+ u8 data;
+
+ /* confgiure the non frequency dependent pll registers */
+ data = 0;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, 1);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, 48);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, 4 << 3); /* bandgap_timer */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, 5); /* pll_wakeup_timer */
+
+ data = pconf->pll_vco_div_ref & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
+ data = (pconf->pll_vco_div_ref >> 8) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
+
+ data = pconf->pll_kvco_div_ref & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
+ data = (pconf->pll_kvco_div_ref >> 8) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, 16);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, 4);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, 4);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, 1 << 3 | 1);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, 0 << 3 | 0);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, 0 << 3 | 0);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, 4 << 3 | 4);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, 1 << 4 | 11);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, 7);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, 1 << 4 | 2);
+}
+
+static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
+{
+ void __iomem *cmn_base = pll_14nm->phy->base;
+
+ /* de assert pll start and apply pll sw reset */
+
+ /* stop pll */
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+ /* pll sw reset */
+ dsi_phy_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
+ wmb(); /* make sure register committed */
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
+ wmb(); /* make sure register committed */
+}
+
+static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
+ struct dsi_pll_config *pconf)
+{
+ void __iomem *base = pll->phy->pll_base;
+ void __iomem *cmn_base = pll->phy->base;
+ u8 data;
+
+ DBG("DSI%d PLL", pll->phy->id);
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, 0x3c);
+
+ pll_db_commit_common(pll, pconf);
+
+ pll_14nm_software_reset(pll);
+
+ /* Use the /2 path in Mux */
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, 1);
+
+ data = 0xff; /* data, clk, pll normal operation */
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
+
+ /* configure the frequency dependent pll registers */
+ data = pconf->dec_start;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
+
+ data = pconf->div_frac_start & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
+ data = (pconf->div_frac_start >> 8) & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
+ data = (pconf->div_frac_start >> 16) & 0xf;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
+
+ data = pconf->plllock_cmp & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
+
+ data = (pconf->plllock_cmp >> 8) & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
+
+ data = (pconf->plllock_cmp >> 16) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
+
+ data = pconf->plllock_cnt << 1 | 0 << 3; /* plllock_rng */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
+
+ data = pconf->pll_vco_count & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
+ data = (pconf->pll_vco_count >> 8) & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
+
+ data = pconf->pll_kvco_count & 0xff;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
+ data = (pconf->pll_kvco_count >> 8) & 0x3;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
+
+ /*
+ * High nibble configures the post divider internal to the VCO. It's
+ * fixed to divide by 1 for now.
+ *
+ * 0: divided by 1
+ * 1: divided by 2
+ * 2: divided by 4
+ * 3: divided by 8
+ */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, 0 << 4 | 3);
+
+ if (pconf->ssc_en)
+ pll_db_commit_ssc(pll, pconf);
+
+ wmb(); /* make sure register committed */
+}
+
+/*
+ * VCO clock Callbacks
+ */
+static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ struct dsi_pll_config conf;
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->phy->id, rate,
+ parent_rate);
+
+ dsi_pll_14nm_config_init(&conf);
+ conf.vco_current_rate = rate;
+
+ pll_14nm_dec_frac_calc(pll_14nm, &conf);
+
+ if (conf.ssc_en)
+ pll_14nm_ssc_calc(pll_14nm, &conf);
+
+ pll_14nm_calc_vco_count(pll_14nm, &conf);
+
+ /* commit the slave DSI PLL registers if we're master. Note that we
+ * don't lock the slave PLL. We just ensure that the PLL/PHY registers
+ * of the master and slave are identical
+ */
+ if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+
+ pll_db_commit_14nm(pll_14nm_slave, &conf);
+ }
+
+ pll_db_commit_14nm(pll_14nm, &conf);
+
+ return 0;
+}
+
+static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ void __iomem *base = pll_14nm->phy->pll_base;
+ u64 vco_rate, multiplier = BIT(20);
+ u32 div_frac_start;
+ u32 dec_start;
+ u64 ref_clk = parent_rate;
+
+ dec_start = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
+ dec_start &= 0x0ff;
+
+ DBG("dec_start = %x", dec_start);
+
+ div_frac_start = (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
+ & 0xf) << 16;
+ div_frac_start |= (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
+ & 0xff) << 8;
+ div_frac_start |= dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
+ & 0xff;
+
+ DBG("div_frac_start = %x", div_frac_start);
+
+ vco_rate = ref_clk * dec_start;
+
+ vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+ /*
+ * Recalculating the rate from dec_start and frac_start doesn't end up
+ * the rate we originally set. Convert the freq to KHz, round it up and
+ * convert it back to MHz.
+ */
+ vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
+
+ DBG("returning vco rate = %lu", (unsigned long)vco_rate);
+
+ return (unsigned long)vco_rate;
+}
+
+static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ void __iomem *base = pll_14nm->phy->pll_base;
+ void __iomem *cmn_base = pll_14nm->phy->base;
+ bool locked;
+
+ DBG("");
+
+ if (unlikely(pll_14nm->phy->pll_on))
+ return 0;
+
+ if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0)
+ dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+ locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+
+ if (unlikely(!locked)) {
+ DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, "DSI PLL lock failed\n");
+ return -EINVAL;
+ }
+
+ DBG("DSI PLL lock success");
+ pll_14nm->phy->pll_on = true;
+
+ return 0;
+}
+
+static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+ void __iomem *cmn_base = pll_14nm->phy->base;
+
+ DBG("");
+
+ if (unlikely(!pll_14nm->phy->pll_on))
+ return;
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+ pll_14nm->phy->pll_on = false;
+}
+
+static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+
+ if (rate < pll_14nm->phy->cfg->min_pll_rate)
+ return pll_14nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_14nm->phy->cfg->max_pll_rate)
+ return pll_14nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
+ .round_rate = dsi_pll_14nm_clk_round_rate,
+ .set_rate = dsi_pll_14nm_vco_set_rate,
+ .recalc_rate = dsi_pll_14nm_vco_recalc_rate,
+ .prepare = dsi_pll_14nm_vco_prepare,
+ .unprepare = dsi_pll_14nm_vco_unprepare,
+};
+
+/*
+ * N1 and N2 post-divider clock callbacks
+ */
+#define div_mask(width) ((1 << (width)) - 1)
+static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+ void __iomem *base = pll_14nm->phy->base;
+ u8 shift = postdiv->shift;
+ u8 width = postdiv->width;
+ u32 val;
+
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, parent_rate);
+
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
+ val &= div_mask(width);
+
+ return divider_recalc_rate(hw, parent_rate, val, NULL,
+ postdiv->flags, width);
+}
+
+static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
+
+ return divider_round_rate(hw, rate, prate, NULL,
+ postdiv->width,
+ postdiv->flags);
+}
+
+static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+ struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+ void __iomem *base = pll_14nm->phy->base;
+ spinlock_t *lock = &pll_14nm->postdiv_lock;
+ u8 shift = postdiv->shift;
+ u8 width = postdiv->width;
+ unsigned int value;
+ unsigned long flags = 0;
+ u32 val;
+
+ DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->phy->id, rate,
+ parent_rate);
+
+ value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
+ postdiv->flags);
+
+ spin_lock_irqsave(lock, flags);
+
+ val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+ val &= ~(div_mask(width) << shift);
+
+ val |= value << shift;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+
+ /* If we're master in bonded DSI mode, then the slave PLL's post-dividers
+ * follow the master's post dividers
+ */
+ if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+ void __iomem *slave_base = pll_14nm_slave->phy->base;
+
+ dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
+ .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
+ .round_rate = dsi_pll_14nm_postdiv_round_rate,
+ .set_rate = dsi_pll_14nm_postdiv_set_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_14nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+ struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+ void __iomem *cmn_base = pll_14nm->phy->base;
+ u32 data;
+
+ data = dsi_phy_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+
+ cached_state->n1postdiv = data & 0xf;
+ cached_state->n2postdiv = (data >> 4) & 0xf;
+
+ DBG("DSI%d PLL save state %x %x", pll_14nm->phy->id,
+ cached_state->n1postdiv, cached_state->n2postdiv);
+
+ cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+}
+
+static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+ struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+ void __iomem *cmn_base = pll_14nm->phy->base;
+ u32 data;
+ int ret;
+
+ ret = dsi_pll_14nm_vco_set_rate(phy->vco_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
+
+ DBG("DSI%d PLL restore state %x %x", pll_14nm->phy->id,
+ cached_state->n1postdiv, cached_state->n2postdiv);
+
+ dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+
+ /* also restore post-dividers for slave DSI PLL */
+ if (phy->usecase == MSM_DSI_PHY_MASTER) {
+ struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+ void __iomem *slave_base = pll_14nm_slave->phy->base;
+
+ dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+ }
+
+ return 0;
+}
+
+static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+ void __iomem *base = phy->pll_base;
+ u32 clkbuflr_en, bandgap = 0;
+
+ switch (phy->usecase) {
+ case MSM_DSI_PHY_STANDALONE:
+ clkbuflr_en = 0x1;
+ break;
+ case MSM_DSI_PHY_MASTER:
+ clkbuflr_en = 0x3;
+ pll_14nm->slave = pll_14nm_list[(pll_14nm->phy->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ clkbuflr_en = 0x0;
+ bandgap = 0x3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
+ if (bandgap)
+ dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
+
+ return 0;
+}
+
+static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
+ const char *name,
+ const struct clk_hw *parent_hw,
+ unsigned long flags,
+ u8 shift)
+{
+ struct dsi_pll_14nm_postdiv *pll_postdiv;
+ struct device *dev = &pll_14nm->phy->pdev->dev;
+ struct clk_init_data postdiv_init = {
+ .parent_hws = (const struct clk_hw *[]) { parent_hw },
+ .num_parents = 1,
+ .name = name,
+ .flags = flags,
+ .ops = &clk_ops_dsi_pll_14nm_postdiv,
+ };
+ int ret;
+
+ pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
+ if (!pll_postdiv)
+ return ERR_PTR(-ENOMEM);
+
+ pll_postdiv->pll = pll_14nm;
+ pll_postdiv->shift = shift;
+ /* both N1 and N2 postdividers are 4 bits wide */
+ pll_postdiv->width = 4;
+ /* range of each divider is from 1 to 15 */
+ pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
+ pll_postdiv->hw.init = &postdiv_init;
+
+ ret = devm_clk_hw_register(dev, &pll_postdiv->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &pll_postdiv->hw;
+}
+
+static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_14nm_vco,
+ };
+ struct device *dev = &pll_14nm->phy->pdev->dev;
+ struct clk_hw *hw, *n1_postdiv, *n1_postdivby2;
+ int ret;
+
+ DBG("DSI%d", pll_14nm->phy->id);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id);
+ pll_14nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+
+ /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
+ n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name,
+ &pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(n1_postdiv))
+ return PTR_ERR(n1_postdiv);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id);
+
+ /* DSI Byte clock = VCO_CLK / N1 / 8 */
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ n1_postdiv, CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
+
+ /*
+ * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
+ * on the way. Don't let it set parent.
+ */
+ n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, n1_postdiv, 0, 1, 2);
+ if (IS_ERR(n1_postdivby2))
+ return PTR_ERR(n1_postdivby2);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id);
+
+ /* DSI pixel clock = VCO_CLK / N1 / 2 / N2
+ * This is the output of N2 post-divider, bits 4-7 in
+ * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
+ */
+ hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2,
+ 0, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+}
+
+static int dsi_pll_14nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_14nm *pll_14nm;
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
+ if (!pll_14nm)
+ return -ENOMEM;
+
+ DBG("PLL%d", phy->id);
+
+ pll_14nm_list[phy->id] = pll_14nm;
+
+ spin_lock_init(&pll_14nm->postdiv_lock);
+
+ pll_14nm->phy = phy;
+
+ ret = pll_14nm_register(pll_14nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_14nm->clk_hw;
+
+ return 0;
+}
+
+static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing,
+ int lane_idx)
+{
+ void __iomem *base = phy->lane_base;
+ bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
+ u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
+ u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
+ u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
+ u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
+ u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
+ u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
+ timing->hs_halfbyte_en;
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
+ DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
+ halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
+ DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
+}
+
+static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ u32 data;
+ int i;
+ int ret;
+ void __iomem *base = phy->base;
+ void __iomem *lane_base = phy->lane_base;
+ u32 glbl_test_ctrl;
+
+ if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ data = 0x1c;
+ if (phy->usecase != MSM_DSI_PHY_STANDALONE)
+ data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
+
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
+
+ /* 4 data lanes + 1 clk lane configuration */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
+ 0x1d);
+
+ dsi_phy_write(lane_base +
+ REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
+ dsi_phy_write(lane_base +
+ REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
+ (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
+
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
+ (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
+ 0);
+ dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
+ 0x88);
+
+ dsi_14nm_dphy_set_timing(phy, timing, i);
+ }
+
+ /* Make sure PLL is not start */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ wmb(); /* make sure everything is written before reset and enable */
+
+ /* reset digital block */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
+ wmb(); /* ensure reset is asserted */
+ udelay(100);
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
+
+ glbl_test_ctrl = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
+ if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
+ glbl_test_ctrl |= DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ else
+ glbl_test_ctrl &= ~DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, glbl_test_ctrl);
+ ret = dsi_14nm_set_usecase(phy);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Remove power down from PLL and all lanes */
+ dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
+
+ return 0;
+}
+
+static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
+ dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
+
+ /* ensure that the phy is completely disabled */
+ wmb();
+}
+
+static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 17000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 73400 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x994400, 0x996400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_73p4mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0xc994400, 0xc996400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
new file mode 100644
index 000000000..c9752b991
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_20nm.xml.h"
+
+static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
+ DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
+ DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
+ DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
+ DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
+ DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
+ DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
+ DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
+ DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
+ DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
+ DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
+ DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->reg_base;
+
+ if (!enable) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode) {
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
+ return;
+ }
+
+ /* non LDO mode */
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
+}
+
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+ u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+ u32 val;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_20nm_phy_regulator_ctrl(phy, true);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
+
+ val = dsi_phy_read(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
+ if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_STANDALONE)
+ val |= DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ else
+ val &= ~DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ dsi_phy_write(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, val);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
+ (i >> 1) * 0x40);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
+ }
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
+ dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
+
+ dsi_20nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
+
+ dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
+
+ /* make sure everything is written before enable */
+ wmb();
+ dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
+
+ return 0;
+}
+
+static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
+ dsi_20nm_phy_regulator_ctrl(phy, false);
+}
+
+static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_20nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators),
+ .ops = {
+ .enable = dsi_20nm_phy_enable,
+ .disable = dsi_20nm_phy_disable,
+ },
+ .io_start = { 0xfd998500, 0xfd9a0500 },
+ .num_dsi_phy = 2,
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
new file mode 100644
index 000000000..4c1bf55c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_28nm.xml.h"
+
+/*
+ * DSI PLL 28nm - clock diagram (eg: DSI0):
+ *
+ * dsi0analog_postdiv_clk
+ * | dsi0indirect_path_div2_clk
+ * | |
+ * +------+ | +----+ | |\ dsi0byte_mux
+ * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
+ * | +------+ +----+ | m| | +----+
+ * | | u|--o--| /4 |-- dsi0pllbyte
+ * | | x| +----+
+ * o--------------------------| /
+ * | |/
+ * | +------+
+ * o----------| DIV3 |------------------------- dsi0pll
+ * +------+
+ */
+
+#define POLL_MAX_READS 10
+#define POLL_TIMEOUT_US 50
+
+#define VCO_REF_CLK_RATE 19200000
+#define VCO_MIN_RATE 350000000
+#define VCO_MAX_RATE 750000000
+
+/* v2.0.0 28nm LP implementation */
+#define DSI_PHY_28NM_QUIRK_PHY_LP BIT(0)
+
+#define LPFR_LUT_SIZE 10
+struct lpfr_cfg {
+ unsigned long vco_rate;
+ u32 resistance;
+};
+
+/* Loop filter resistance: */
+static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
+ { 479500000, 8 },
+ { 480000000, 11 },
+ { 575500000, 8 },
+ { 576000000, 12 },
+ { 610500000, 8 },
+ { 659500000, 9 },
+ { 671500000, 10 },
+ { 672000000, 14 },
+ { 708500000, 10 },
+ { 750000000, 11 },
+};
+
+struct pll_28nm_cached_state {
+ unsigned long vco_rate;
+ u8 postdiv3;
+ u8 postdiv1;
+ u8 byte_mux;
+};
+
+struct dsi_pll_28nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+ u32 nb_tries, u32 timeout_us)
+{
+ bool pll_locked = false;
+ u32 val;
+
+ while (nb_tries--) {
+ val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
+ pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+ DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
+{
+ void __iomem *base = pll_28nm->phy->pll_base;
+
+ /*
+ * Add HW recommended delays after toggling the software
+ * reset bit off and back on.
+ */
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
+ DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ unsigned long div_fbx1000, gen_vco_clk;
+ u32 refclk_cfg, frac_n_mode, frac_n_value;
+ u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
+ u32 cal_cfg10, cal_cfg11;
+ u32 rem;
+ int i;
+
+ VERB("rate=%lu, parent's=%lu", rate, parent_rate);
+
+ /* Force postdiv2 to be div-4 */
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
+
+ /* Configure the Loop filter resistance */
+ for (i = 0; i < LPFR_LUT_SIZE; i++)
+ if (rate <= lpfr_lut[i].vco_rate)
+ break;
+ if (i == LPFR_LUT_SIZE) {
+ DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
+ rate);
+ return -EINVAL;
+ }
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
+
+ /* Loop filter capacitance values : c1 and c2 */
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
+
+ rem = rate % VCO_REF_CLK_RATE;
+ if (rem) {
+ refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+ frac_n_mode = 1;
+ div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
+ gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
+ } else {
+ refclk_cfg = 0x0;
+ frac_n_mode = 0;
+ div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
+ gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
+ }
+
+ DBG("refclk_cfg = %d", refclk_cfg);
+
+ rem = div_fbx1000 % 1000;
+ frac_n_value = (rem << 16) / 1000;
+
+ DBG("div_fb = %lu", div_fbx1000);
+ DBG("frac_n_value = %d", frac_n_value);
+
+ DBG("Generated VCO Clock: %lu", gen_vco_clk);
+ rem = 0;
+ sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
+ sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+ if (frac_n_mode) {
+ sdm_cfg0 = 0x0;
+ sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
+ sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
+ (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+ sdm_cfg3 = frac_n_value >> 8;
+ sdm_cfg2 = frac_n_value & 0xff;
+ } else {
+ sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
+ sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
+ (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+ sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
+ sdm_cfg2 = 0;
+ sdm_cfg3 = 0;
+ }
+
+ DBG("sdm_cfg0=%d", sdm_cfg0);
+ DBG("sdm_cfg1=%d", sdm_cfg1);
+ DBG("sdm_cfg2=%d", sdm_cfg2);
+ DBG("sdm_cfg3=%d", sdm_cfg3);
+
+ cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
+ cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
+ DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
+
+ /* Add hardware recommended delay for correct PLL configuration */
+ if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+ udelay(1000);
+ else
+ udelay(1);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
+
+ return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ void __iomem *base = pll_28nm->phy->pll_base;
+ u32 sdm0, doubler, sdm_byp_div;
+ u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+ u32 ref_clk = VCO_REF_CLK_RATE;
+ unsigned long vco_rate;
+
+ VERB("parent_rate=%lu", parent_rate);
+
+ /* Check to see if the ref clk doubler is enabled */
+ doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
+ DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+ ref_clk += (doubler * VCO_REF_CLK_RATE);
+
+ /* see if it is integer mode or sdm mode */
+ sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
+ if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
+ /* integer mode */
+ sdm_byp_div = FIELD(
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
+ DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
+ vco_rate = ref_clk * sdm_byp_div;
+ } else {
+ /* sdm mode */
+ sdm_dc_off = FIELD(
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
+ DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
+ DBG("sdm_dc_off = %d", sdm_dc_off);
+ sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
+ sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
+ sdm_freq_seed = (sdm3 << 8) | sdm2;
+ DBG("sdm_freq_seed = %d", sdm_freq_seed);
+
+ vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+ mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+ DBG("vco rate = %lu", vco_rate);
+ }
+
+ DBG("returning vco rate = %lu", vco_rate);
+
+ return vco_rate;
+}
+
+static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
+{
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ u32 max_reads = 5, timeout_us = 100;
+ bool locked;
+ u32 val;
+ int i;
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+
+ for (i = 0; i < 2; i++) {
+ /* DSI Uniphy lock detect setting */
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
+ 0x0c, 100);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+ /* poll for PLL ready status */
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
+ timeout_us);
+ if (locked)
+ break;
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
+
+ val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+ }
+
+ if (unlikely(!locked))
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+ else
+ DBG("DSI PLL Lock success");
+
+ return locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ int i, ret;
+
+ if (unlikely(pll_28nm->phy->pll_on))
+ return 0;
+
+ for (i = 0; i < 3; i++) {
+ ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
+ if (!ret) {
+ pll_28nm->phy->pll_on = true;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ bool locked;
+ u32 max_reads = 10, timeout_us = 50;
+ u32 val;
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(pll_28nm->phy->pll_on))
+ return 0;
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
+
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
+ DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ /* DSI PLL toggle lock detect setting */
+ dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
+ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
+
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+ if (unlikely(!locked)) {
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+ return -EINVAL;
+ }
+
+ DBG("DSI PLL lock success");
+ pll_28nm->phy->pll_on = true;
+
+ return 0;
+}
+
+static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(!pll_28nm->phy->pll_on))
+ return;
+
+ dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
+
+ pll_28nm->phy->pll_on = false;
+}
+
+static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ if (rate < pll_28nm->phy->cfg->min_pll_rate)
+ return pll_28nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_28nm->phy->cfg->max_pll_rate)
+ return pll_28nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
+ .round_rate = dsi_pll_28nm_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = dsi_pll_28nm_vco_prepare_hpm,
+ .unprepare = dsi_pll_28nm_vco_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
+ .round_rate = dsi_pll_28nm_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = dsi_pll_28nm_vco_prepare_lp,
+ .unprepare = dsi_pll_28nm_vco_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+
+ cached_state->postdiv3 =
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
+ cached_state->postdiv1 =
+ dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
+ cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
+ if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
+ cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+ else
+ cached_state->vco_rate = 0;
+}
+
+static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ int ret;
+
+ ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ cached_state->postdiv3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ cached_state->postdiv1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+ cached_state->byte_mux);
+
+ return 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref", .name = "xo",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ };
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
+ int ret;
+
+ DBG("%d", pll_28nm->phy->id);
+
+ if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+ vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
+ else
+ vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
+ pll_28nm->clk_hw.init = &vco_init;
+ ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
+ analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_28nm->phy->pll_base +
+ REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ 0, 4, 0, NULL);
+ if (IS_ERR(analog_postdiv))
+ return PTR_ERR(analog_postdiv);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
+ indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
+ if (IS_ERR(indirect_path_div2))
+ return PTR_ERR(indirect_path_div2);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
+ REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ 0, 8, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
+ byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ &pll_28nm->clk_hw,
+ indirect_path_div2,
+ }), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
+ REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+ if (IS_ERR(byte_mux))
+ return PTR_ERR(byte_mux);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ byte_mux, CLK_SET_RATE_PARENT, 1, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ return 0;
+}
+
+static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_28nm *pll_28nm;
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+ if (!pll_28nm)
+ return -ENOMEM;
+
+ pll_28nm->phy = phy;
+
+ ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_28nm->clk_hw;
+
+ return 0;
+}
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ if (timing->clk_zero & BIT(8))
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
+}
+
+static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
+
+ if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
+ else
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!enable) {
+ dsi_phy_write(phy->reg_base +
+ REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
+ return;
+ }
+
+ if (phy->regulator_ldo_mode)
+ dsi_28nm_phy_regulator_enable_ldo(phy);
+ else
+ dsi_28nm_phy_regulator_enable_dcdc(phy);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ int i;
+ void __iomem *base = phy->base;
+ u32 val;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
+
+ dsi_28nm_phy_regulator_ctrl(phy, true);
+
+ dsi_28nm_dphy_set_timing(phy, timing);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
+
+ dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
+ if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
+ val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ else
+ val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+ dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
+ dsi_28nm_phy_regulator_ctrl(phy, false);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0xfd922b00, 0xfd923100 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a98500 },
+ .num_dsi_phy = 1,
+ .quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
+};
+
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
new file mode 100644
index 000000000..26c08047e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_28nm_8960.xml.h"
+
+/*
+ * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
+ *
+ *
+ * +------+
+ * dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
+ * F * byte_clk | +------+
+ * | bit clock divider (F / 8)
+ * |
+ * | +------+
+ * o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
+ * | +------+ | (sets parent rate)
+ * | byte clock divider (F) |
+ * | |
+ * | o---> To esc RCG
+ * | (doesn't set parent rate)
+ * |
+ * | +------+
+ * o-----| DIV3 |----dsi0pll------o---> To dsi RCG
+ * +------+ | (sets parent rate)
+ * dsi clock divider (F * magic) |
+ * |
+ * o---> To pixel rcg
+ * (doesn't set parent rate)
+ */
+
+#define POLL_MAX_READS 8000
+#define POLL_TIMEOUT_US 1
+
+#define VCO_REF_CLK_RATE 27000000
+#define VCO_MIN_RATE 600000000
+#define VCO_MAX_RATE 1200000000
+
+#define VCO_PREF_DIV_RATIO 27
+
+struct pll_28nm_cached_state {
+ unsigned long vco_rate;
+ u8 postdiv3;
+ u8 postdiv2;
+ u8 postdiv1;
+};
+
+struct clk_bytediv {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+
+struct dsi_pll_28nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+ int nb_tries, int timeout_us)
+{
+ bool pll_locked = false;
+ u32 val;
+
+ while (nb_tries--) {
+ val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
+ pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+ DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ void __iomem *base = pll_28nm->phy->pll_base;
+ u32 val, temp, fb_divider;
+
+ DBG("rate=%lu, parent's=%lu", rate, parent_rate);
+
+ temp = rate / 10;
+ val = VCO_REF_CLK_RATE / 10;
+ fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
+ fb_divider = fb_divider / 2 - 1;
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
+ fb_divider & 0xff);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
+
+ val |= (fb_divider >> 8) & 0x07;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
+ val);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+
+ val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
+ val);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
+ 0xf);
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+ val |= 0x7 << 4;
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+ val);
+
+ return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ void __iomem *base = pll_28nm->phy->pll_base;
+ unsigned long vco_rate;
+ u32 status, fb_divider, temp, ref_divider;
+
+ VERB("parent_rate=%lu", parent_rate);
+
+ status = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
+
+ if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
+ fb_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
+ fb_divider &= 0xff;
+ temp = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
+ fb_divider = (temp << 8) | fb_divider;
+ fb_divider += 1;
+
+ ref_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+ ref_divider &= 0x3f;
+ ref_divider += 1;
+
+ /* multiply by 2 */
+ vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
+ } else {
+ vco_rate = 0;
+ }
+
+ DBG("returning vco rate = %lu", vco_rate);
+
+ return vco_rate;
+}
+
+static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ bool locked;
+ unsigned int bit_div, byte_div;
+ int max_reads = 1000, timeout_us = 100;
+ u32 val;
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(pll_28nm->phy->pll_on))
+ return 0;
+
+ /*
+ * before enabling the PLL, configure the bit clock divider since we
+ * don't expose it as a clock to the outside world
+ * 1: read back the byte clock divider that should already be set
+ * 2: divide by 8 to get bit clock divider
+ * 3: write it to POSTDIV1
+ */
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+ byte_div = val + 1;
+ bit_div = byte_div / 8;
+
+ val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+ val &= ~0xf;
+ val |= (bit_div - 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
+
+ /* enable the PLL */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
+ DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+ if (unlikely(!locked)) {
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+ return -EINVAL;
+ }
+
+ DBG("DSI PLL lock success");
+ pll_28nm->phy->pll_on = true;
+
+ return 0;
+}
+
+static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ DBG("id=%d", pll_28nm->phy->id);
+
+ if (unlikely(!pll_28nm->phy->pll_on))
+ return;
+
+ dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
+
+ pll_28nm->phy->pll_on = false;
+}
+
+static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+ if (rate < pll_28nm->phy->cfg->min_pll_rate)
+ return pll_28nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_28nm->phy->cfg->max_pll_rate)
+ return pll_28nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+ .round_rate = dsi_pll_28nm_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = dsi_pll_28nm_vco_prepare,
+ .unprepare = dsi_pll_28nm_vco_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * Custom byte clock divier clk_ops
+ *
+ * This clock is the entry point to configuring the PLL. The user (dsi host)
+ * will set this clock's rate to the desired byte clock rate. The VCO lock
+ * frequency is a multiple of the byte clock rate. The multiplication factor
+ * (shown as F in the diagram above) is a function of the byte clock rate.
+ *
+ * This custom divider clock ensures that its parent (VCO) is set to the
+ * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
+ * accordingly
+ */
+#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
+
+static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+ unsigned int div;
+
+ div = dsi_phy_read(bytediv->reg) & 0xff;
+
+ return parent_rate / (div + 1);
+}
+
+/* find multiplication factor(wrt byte clock) at which the VCO should be set */
+static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
+{
+ unsigned long bit_mhz;
+
+ /* convert to bit clock in Mhz */
+ bit_mhz = (byte_clk_rate * 8) / 1000000;
+
+ if (bit_mhz < 125)
+ return 64;
+ else if (bit_mhz < 250)
+ return 32;
+ else if (bit_mhz < 600)
+ return 16;
+ else
+ return 8;
+}
+
+static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long best_parent;
+ unsigned int factor;
+
+ factor = get_vco_mul_factor(rate);
+
+ best_parent = rate * factor;
+ *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+
+ return *prate / factor;
+}
+
+static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+ u32 val;
+ unsigned int factor;
+
+ factor = get_vco_mul_factor(rate);
+
+ val = dsi_phy_read(bytediv->reg);
+ val |= (factor - 1) & 0xff;
+ dsi_phy_write(bytediv->reg, val);
+
+ return 0;
+}
+
+/* Our special byte clock divider ops */
+static const struct clk_ops clk_bytediv_ops = {
+ .round_rate = clk_bytediv_round_rate,
+ .set_rate = clk_bytediv_set_rate,
+ .recalc_rate = clk_bytediv_recalc_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+
+ cached_state->postdiv3 =
+ dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
+ cached_state->postdiv2 =
+ dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+ cached_state->postdiv1 =
+ dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+
+ cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+}
+
+static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->phy->pll_base;
+ int ret;
+
+ ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+ cached_state->postdiv3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
+ cached_state->postdiv2);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+ cached_state->postdiv1);
+
+ return 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_28nm_vco,
+ };
+ struct device *dev = &pll_28nm->phy->pdev->dev;
+ struct clk_hw *hw;
+ struct clk_bytediv *bytediv;
+ struct clk_init_data bytediv_init = { };
+ int ret;
+
+ DBG("%d", pll_28nm->phy->id);
+
+ bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
+ if (!bytediv)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
+ vco_init.name = clk_name;
+
+ pll_28nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
+ if (ret)
+ return ret;
+
+ /* prepare and register bytediv */
+ bytediv->hw.init = &bytediv_init;
+ bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
+
+ bytediv_init.name = clk_name;
+ bytediv_init.ops = &clk_bytediv_ops;
+ bytediv_init.flags = CLK_SET_RATE_PARENT;
+ bytediv_init.parent_hws = (const struct clk_hw*[]){
+ &pll_28nm->clk_hw,
+ };
+ bytediv_init.num_parents = 1;
+
+ /* DIV2 */
+ ret = devm_clk_hw_register(dev, &bytediv->hw);
+ if (ret)
+ return ret;
+ provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
+ /* DIV3 */
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
+ REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+ 0, 8, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+}
+
+static int dsi_pll_28nm_8960_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_28nm *pll_28nm;
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+ if (!pll_28nm)
+ return -ENOMEM;
+
+ pll_28nm->phy = phy;
+
+ ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_28nm->clk_hw;
+
+ return 0;
+}
+
+static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
+ struct msm_dsi_dphy_timing *timing)
+{
+ void __iomem *base = phy->base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
+ DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
+ DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
+ DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
+ DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
+ DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
+ DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
+ DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
+ DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
+ DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
+ DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+}
+
+static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
+ 0x100);
+}
+
+static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
+}
+
+static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->reg_base;
+ u32 status;
+ int i = 5000;
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
+ 0x3);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
+ usleep_range(5000, 6000);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
+
+ do {
+ status = dsi_phy_read(base +
+ REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
+
+ if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
+ break;
+
+ udelay(1);
+ } while (--i > 0);
+}
+
+static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
+ 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
+ 0x01);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
+ 0x66);
+ }
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
+}
+
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dsi_28nm_phy_regulator_init(phy);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
+
+ /* strength control */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
+
+ /* phy ctrl */
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
+
+ dsi_28nm_phy_regulator_ctrl(phy);
+
+ dsi_28nm_phy_calibration(phy);
+
+ dsi_28nm_phy_lane_config(phy);
+
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
+ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
+
+ dsi_28nm_dphy_set_timing(phy, timing);
+
+ return 0;
+}
+
+static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
+
+ /*
+ * Wait for the registers writes to complete in order to
+ * ensure that the phy is completely disabled
+ */
+ wmb();
+}
+
+static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
+ .has_phy_regulator = true,
+ .regulator_data = dsi_phy_28nm_8960_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
+ .ops = {
+ .enable = dsi_28nm_phy_enable,
+ .disable = dsi_28nm_phy_disable,
+ .pll_init = dsi_pll_28nm_8960_init,
+ .save_pll_state = dsi_28nm_pll_save_state,
+ .restore_pll_state = dsi_28nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x4700300, 0x5800300 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
new file mode 100644
index 000000000..9e7fa7d88
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -0,0 +1,1104 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+#include "dsi_phy_7nm.xml.h"
+
+/*
+ * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define VCO_REF_CLK_RATE 19200000
+#define FRAC_BITS 18
+
+/* Hardware is V4.1 */
+#define DSI_PHY_7NM_QUIRK_V4_1 BIT(0)
+
+struct dsi_pll_config {
+ bool enable_ssc;
+ bool ssc_center;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+
+ /* out */
+ u32 decimal_div_start;
+ u32 frac_div_start;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize;
+ u32 ssc_div_per;
+};
+
+struct pll_7nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_7nm {
+ struct clk_hw clk_hw;
+
+ struct msm_dsi_phy *phy;
+
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ struct pll_7nm_cached_state cached_state;
+
+ struct dsi_pll_7nm *slave;
+};
+
+#define to_pll_7nm(x) container_of(x, struct dsi_pll_7nm, clk_hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for bonded DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_config *config)
+{
+ config->ssc_freq = 31500;
+ config->ssc_offset = 4800;
+ config->ssc_adj_per = 2;
+
+ /* TODO: ssc enable */
+ config->enable_ssc = false;
+ config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ u64 fref = VCO_REF_CLK_RATE;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ divider = fref * 2;
+
+ multiplier = 1 << FRAC_BITS;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
+
+ if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1))
+ config->pll_clock_inverters = 0x28;
+ else if (pll_freq <= 1000000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 3020000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+
+ config->decimal_div_start = dec;
+ config->frac_div_start = frac;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = config->frac_div_start;
+ ssc_step_size = config->decimal_div_start;
+ ssc_step_size *= (1 << FRAC_BITS);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ config->ssc_div_per = ssc_per;
+ config->ssc_stepsize = ssc_step_size;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ config->decimal_div_start, frac, FRAC_BITS);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ if (config->enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ config->ssc_stepsize & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ config->ssc_stepsize >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ config->ssc_div_per & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ config->ssc_div_per >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
+ config->ssc_adj_per & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
+ config->ssc_adj_per >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
+{
+ void __iomem *base = pll->phy->pll_base;
+ u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
+
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ if (pll->vco_current_rate >= 3100000000ULL)
+ analog_controls_five_1 = 0x03;
+
+ if (pll->vco_current_rate < 1520000000ULL)
+ vco_config_1 = 0x08;
+ else if (pll->vco_current_rate < 2990000000ULL)
+ vco_config_1 = 0x01;
+ }
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
+ analog_controls_five_1);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
+ pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1 ? 0x3f : 0x22);
+
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+ if (pll->slave)
+ dsi_phy_write(pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+ }
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+ void __iomem *base = pll->phy->pll_base;
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
+ config->decimal_div_start);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+ config->frac_div_start & 0xff);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
+ (config->frac_div_start & 0xff00) >> 8);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+ (config->frac_div_start & 0x30000) >> 16);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
+ pll->phy->cphy_mode ? 0x00 : 0x10);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
+ config->pll_clock_inverters);
+}
+
+static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+ struct dsi_pll_config config;
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate,
+ parent_rate);
+
+ pll_7nm->vco_current_rate = rate;
+
+ dsi_pll_setup_config(&config);
+
+ dsi_pll_calc_dec_frac(pll_7nm, &config);
+
+ dsi_pll_calc_ssc(pll_7nm, &config);
+
+ dsi_pll_commit(pll_7nm, &config);
+
+ dsi_pll_config_hzindep_reg(pll_7nm);
+
+ dsi_pll_ssc_commit(pll_7nm, &config);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
+{
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->phy->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+{
+ u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
+ dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+ u32 data;
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+{
+ u32 data;
+
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
+
+ data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5) | BIT(4));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+{
+ /*
+ * Reset the PHY digital domain. This would be needed when
+ * coming out of a CX or analog rail power collapse while
+ * ensuring that the pads maintain LP00 or LP11 state
+ */
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+ wmb(); /* Ensure that the reset is deasserted */
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+ wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_enable_pll_bias(pll_7nm->slave);
+
+ /* Start PLL */
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_7nm_lock_status(pll_7nm);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", pll_7nm->phy->id);
+ goto error;
+ }
+
+ pll_7nm->phy->pll_on = true;
+
+ /*
+ * assert power on reset for PHY digital in case the PLL is
+ * enabled after CX of analog domain power collapse. This needs
+ * to be done before enabling the global clk.
+ */
+ dsi_pll_phy_dig_reset(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_phy_dig_reset(pll_7nm->slave);
+
+ dsi_pll_enable_global_clk(pll_7nm);
+ if (pll_7nm->slave)
+ dsi_pll_enable_global_clk(pll_7nm->slave);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
+{
+ dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_7nm);
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_7nm);
+ if (pll_7nm->slave) {
+ dsi_pll_disable_global_clk(pll_7nm->slave);
+ dsi_pll_disable_sub(pll_7nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll_7nm->phy->pll_on = false;
+}
+
+static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+ void __iomem *base = pll_7nm->phy->pll_base;
+ u64 ref_clk = VCO_REF_CLK_RATE;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ */
+ multiplier = 1 << FRAC_BITS;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+ pll_7nm->vco_current_rate = vco_rate;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static long dsi_pll_7nm_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+
+ if (rate < pll_7nm->phy->cfg->min_pll_rate)
+ return pll_7nm->phy->cfg->min_pll_rate;
+ else if (rate > pll_7nm->phy->cfg->max_pll_rate)
+ return pll_7nm->phy->cfg->max_pll_rate;
+ else
+ return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
+ .round_rate = dsi_pll_7nm_clk_round_rate,
+ .set_rate = dsi_pll_7nm_vco_set_rate,
+ .recalc_rate = dsi_pll_7nm_vco_recalc_rate,
+ .prepare = dsi_pll_7nm_vco_prepare,
+ .unprepare = dsi_pll_7nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+ void __iomem *phy_base = pll_7nm->phy->base;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+ void __iomem *phy_base = pll_7nm->phy->base;
+ u32 val;
+ int ret;
+
+ val = dsi_phy_read(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
+
+ ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
+ pll_7nm->vco_current_rate,
+ VCO_REF_CLK_RATE);
+ if (ret) {
+ DRM_DEV_ERROR(&pll_7nm->phy->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ DBG("DSI PLL%d", pll_7nm->phy->id);
+
+ return 0;
+}
+
+static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+{
+ struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+ void __iomem *base = phy->base;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_7nm->phy->id);
+
+ switch (phy->usecase) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ return 0;
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
+{
+ char clk_name[32];
+ struct clk_init_data vco_init = {
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "ref",
+ },
+ .num_parents = 1,
+ .name = clk_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_7nm_vco,
+ };
+ struct device *dev = &pll_7nm->phy->pdev->dev;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent;
+ int ret;
+
+ DBG("DSI%d", pll_7nm->phy->id);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id);
+ pll_7nm->clk_hw.init = &vco_init;
+
+ ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
+ if (ret)
+ return ret;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_7nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1,
+ pll_7nm->phy->cphy_mode ? 7 : 8);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
+
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
+ goto fail;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+
+ if (pll_7nm->phy->cphy_mode)
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 2, 7);
+ else
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
+ goto fail;
+ }
+
+ /* in CPHY mode, pclk_mux will always have post_out_div as parent
+ * don't register a pclk_mux clock and just use post_out_div instead
+ */
+ if (pll_7nm->phy->cphy_mode) {
+ u32 data;
+
+ data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
+
+ phy_pll_out_dsi_parent = pll_post_out_div;
+ } else {
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
+
+ hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ }), 2, 0, pll_7nm->phy->base +
+ REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+ 0, 1, 0, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ phy_pll_out_dsi_parent = hw;
+ }
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ phy_pll_out_dsi_parent, 0,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail;
+ }
+
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+ return 0;
+
+fail:
+
+ return ret;
+}
+
+static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ struct dsi_pll_7nm *pll_7nm;
+ int ret;
+
+ pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
+ if (!pll_7nm)
+ return -ENOMEM;
+
+ DBG("DSI PLL%d", phy->id);
+
+ pll_7nm_list[phy->id] = pll_7nm;
+
+ spin_lock_init(&pll_7nm->postdiv_lock);
+
+ pll_7nm->phy = phy;
+
+ ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy->vco_hw = &pll_7nm->clk_hw;
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_phy_pll_save_state(phy);
+
+ return 0;
+}
+
+static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
+ const u8 *tx_dctrl = tx_dctrl_0;
+ void __iomem *lane_base = phy->lane_base;
+
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1)
+ tx_dctrl = tx_dctrl_1;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
+ }
+
+ dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
+ dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
+ }
+}
+
+static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ bool less_than_1500_mhz;
+ u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
+ u32 glbl_pemph_ctrl_0;
+ u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
+ u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
+ u32 data;
+
+ DBG("");
+
+ if (phy->cphy_mode)
+ ret = msm_dsi_cphy_timing_calc_v4(timing, clk_req);
+ else
+ ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev,
+ "%s: PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v4_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* TODO: CPHY enable path (this is for DPHY only) */
+
+ /* Alter PHY configurations if data rate less than 1.5GHZ*/
+ less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
+
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
+ if (phy->cphy_mode) {
+ glbl_rescode_top_ctrl = 0x00;
+ glbl_rescode_bot_ctrl = 0x3c;
+ } else {
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
+ }
+ glbl_str_swi_cal_sel_ctrl = 0x00;
+ glbl_hstx_str_ctrl_0 = 0x88;
+ } else {
+ vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
+ if (phy->cphy_mode) {
+ glbl_str_swi_cal_sel_ctrl = 0x03;
+ glbl_hstx_str_ctrl_0 = 0x66;
+ } else {
+ glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
+ glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
+ }
+ glbl_rescode_top_ctrl = 0x03;
+ glbl_rescode_bot_ctrl = 0x3c;
+ }
+
+ if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x51;
+ vreg_ctrl_1 = 0x55;
+ glbl_pemph_ctrl_0 = 0x11;
+ lane_ctrl0 = 0x17;
+ } else {
+ vreg_ctrl_1 = 0x5c;
+ glbl_pemph_ctrl_0 = 0x00;
+ lane_ctrl0 = 0x1f;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* program CMN_CTRL_4 for minor_ver 2 chipsets*/
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
+ data = data & (0xf0);
+ if (data == 0x20)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6));
+
+ /* Enable LDO */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
+ glbl_str_swi_cal_sel_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
+ glbl_hstx_str_ctrl_0);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0,
+ glbl_pemph_ctrl_0);
+ if (phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
+ glbl_rescode_top_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
+ glbl_rescode_bot_ctrl);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
+
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0);
+
+ /* Select full-rate mode */
+ if (!phy->cphy_mode)
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = dsi_7nm_set_usecase(phy);
+ if (ret) {
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI PHY timings */
+ if (phy->cphy_mode) {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
+ timing->shared_timings.clk_post);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ } else {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
+ timing->shared_timings.clk_pre);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
+ timing->shared_timings.clk_post);
+ }
+
+ /* DSI lane settings */
+ dsi_phy_hw_v4_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
+ if (enable)
+ data |= BIT(5) | BIT(6);
+ else
+ data &= ~(BIT(5) | BIT(6));
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data);
+
+ return enable;
+}
+
+static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data;
+
+ DBG("");
+
+ if (dsi_phy_hw_v4_0_is_pll_on(phy))
+ pr_warn("Turning OFF PHY while PLL is on\n");
+
+ dsi_phy_hw_v4_0_config_lpcdrx(phy, false);
+ data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+ /* disable all lanes */
+ data &= ~0x1F;
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0);
+
+ /* Turn off all PHY blocks */
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x00);
+ /* make sure phy is turned off */
+ wmb();
+
+ DBG("DSI%d PHY disabled", phy->id);
+}
+
+static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 37550 },
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_1,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 1000000000UL,
+ .max_pll_rate = 3500000000UL,
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_37750uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000ULL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400 },
+ .num_dsi_phy = 1,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_1,
+};
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 000000000..2ae711cbe
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,70 @@
+#ifndef SFPB_XML
+#define SFPB_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum sfpb_ahb_arb_master_port_en {
+ SFPB_MASTER_PORT_ENABLE = 3,
+ SFPB_MASTER_PORT_DISABLE = 0,
+};
+
+#define REG_SFPB_GPREG 0x00000058
+#define SFPB_GPREG_MASTER_PORT_EN__MASK 0x00001800
+#define SFPB_GPREG_MASTER_PORT_EN__SHIFT 11
+static inline uint32_t SFPB_GPREG_MASTER_PORT_EN(enum sfpb_ahb_arb_master_port_en val)
+{
+ return ((val) << SFPB_GPREG_MASTER_PORT_EN__SHIFT) & SFPB_GPREG_MASTER_PORT_EN__MASK;
+}
+
+
+#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 000000000..333cedc11
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_of.h>
+
+#include <sound/hdmi-codec.h>
+#include "hdmi.h"
+
+void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl = HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+static irqreturn_t msm_hdmi_irq(int irq, void *dev_id)
+{
+ struct hdmi *hdmi = dev_id;
+
+ /* Process HPD: */
+ msm_hdmi_hpd_irq(hdmi->bridge);
+
+ /* Process DDC: */
+ msm_hdmi_i2c_irq(hdmi->i2c);
+
+ /* Process HDCP: */
+ if (hdmi->hdcp_ctrl)
+ msm_hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+
+ /* TODO audio.. */
+
+ return IRQ_HANDLED;
+}
+
+static void msm_hdmi_destroy(struct hdmi *hdmi)
+{
+ /*
+ * at this point, hpd has been disabled,
+ * after flush workq, it's safe to deinit hdcp
+ */
+ if (hdmi->workq)
+ destroy_workqueue(hdmi->workq);
+ msm_hdmi_hdcp_destroy(hdmi);
+
+ if (hdmi->phy_dev) {
+ put_device(hdmi->phy_dev);
+ hdmi->phy = NULL;
+ hdmi->phy_dev = NULL;
+ }
+
+ if (hdmi->i2c)
+ msm_hdmi_i2c_destroy(hdmi->i2c);
+
+ platform_set_drvdata(hdmi->pdev, NULL);
+}
+
+static int msm_hdmi_get_phy(struct hdmi *hdmi)
+{
+ struct platform_device *pdev = hdmi->pdev;
+ struct platform_device *phy_pdev;
+ struct device_node *phy_node;
+
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
+ if (!phy_node) {
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
+ return -ENXIO;
+ }
+
+ phy_pdev = of_find_device_by_node(phy_node);
+ if (phy_pdev)
+ hdmi->phy = platform_get_drvdata(phy_pdev);
+
+ of_node_put(phy_node);
+
+ if (!phy_pdev) {
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+ return -EPROBE_DEFER;
+ }
+ if (!hdmi->phy) {
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+ put_device(&phy_pdev->dev);
+ return -EPROBE_DEFER;
+ }
+
+ hdmi->phy_dev = get_device(&phy_pdev->dev);
+
+ return 0;
+}
+
+/* construct hdmi at bind/probe time, grab all the resources. If
+ * we are to EPROBE_DEFER we want to do it here, rather than later
+ * at modeset_init() time
+ */
+static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
+{
+ struct hdmi_platform_config *config = pdev->dev.platform_data;
+ struct hdmi *hdmi = NULL;
+ struct resource *res;
+ int i, ret;
+
+ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi->pdev = pdev;
+ hdmi->config = config;
+ spin_lock_init(&hdmi->reg_lock);
+
+ ret = drm_of_find_panel_or_bridge(pdev->dev.of_node, 1, 0, NULL, &hdmi->next_bridge);
+ if (ret && ret != -ENODEV)
+ goto fail;
+
+ hdmi->mmio = msm_ioremap(pdev, config->mmio_name);
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ goto fail;
+ }
+
+ /* HDCP needs physical address of hdmi register */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ config->mmio_name);
+ if (!res) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ hdmi->mmio_phy_addr = res->start;
+
+ hdmi->qfprom_mmio = msm_ioremap(pdev, config->qfprom_mmio_name);
+ if (IS_ERR(hdmi->qfprom_mmio)) {
+ DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
+ hdmi->qfprom_mmio = NULL;
+ }
+
+ hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
+ config->hpd_reg_cnt,
+ sizeof(hdmi->hpd_regs[0]),
+ GFP_KERNEL);
+ if (!hdmi->hpd_regs) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < config->hpd_reg_cnt; i++)
+ hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
+
+ ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
+ config->pwr_reg_cnt,
+ sizeof(hdmi->pwr_regs[0]),
+ GFP_KERNEL);
+ if (!hdmi->pwr_regs) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < config->pwr_reg_cnt; i++)
+ hdmi->pwr_regs[i].supply = config->pwr_reg_names[i];
+
+ ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
+ config->hpd_clk_cnt,
+ sizeof(hdmi->hpd_clks[0]),
+ GFP_KERNEL);
+ if (!hdmi->hpd_clks) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->hpd_clks[i] = clk;
+ }
+
+ hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
+ config->pwr_clk_cnt,
+ sizeof(hdmi->pwr_clks[0]),
+ GFP_KERNEL);
+ if (!hdmi->pwr_clks) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ struct clk *clk;
+
+ clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ goto fail;
+ }
+
+ hdmi->pwr_clks[i] = clk;
+ }
+
+ hdmi->hpd_gpiod = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
+ /* This will catch e.g. -EPROBE_DEFER */
+ if (IS_ERR(hdmi->hpd_gpiod)) {
+ ret = PTR_ERR(hdmi->hpd_gpiod);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd gpio: (%d)\n", ret);
+ goto fail;
+ }
+
+ if (!hdmi->hpd_gpiod)
+ DBG("failed to get HPD gpio");
+
+ if (hdmi->hpd_gpiod)
+ gpiod_set_consumer_name(hdmi->hpd_gpiod, "HDMI_HPD");
+
+ devm_pm_runtime_enable(&pdev->dev);
+
+ hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+ if (!hdmi->workq) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi->i2c = msm_hdmi_i2c_init(hdmi);
+ if (IS_ERR(hdmi->i2c)) {
+ ret = PTR_ERR(hdmi->i2c);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
+ hdmi->i2c = NULL;
+ goto fail;
+ }
+
+ ret = msm_hdmi_get_phy(hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
+ goto fail;
+ }
+
+ hdmi->hdcp_ctrl = msm_hdmi_hdcp_init(hdmi);
+ if (IS_ERR(hdmi->hdcp_ctrl)) {
+ dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
+ hdmi->hdcp_ctrl = NULL;
+ }
+
+ return hdmi;
+
+fail:
+ if (hdmi)
+ msm_hdmi_destroy(hdmi);
+
+ return ERR_PTR(ret);
+}
+
+/* Second part of initialization, the drm/kms level modeset_init,
+ * constructs/initializes mode objects, etc, is called from master
+ * driver (not hdmi sub-device's probe/bind!)
+ *
+ * Any resource (regulator/clk/etc) which could be missing at boot
+ * should be handled in msm_hdmi_init() so that failure happens from
+ * hdmi sub-device's probe.
+ */
+int msm_hdmi_modeset_init(struct hdmi *hdmi,
+ struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = hdmi->pdev;
+ int ret;
+
+ if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
+ DRM_DEV_ERROR(dev->dev, "too many bridges\n");
+ return -ENOSPC;
+ }
+
+ hdmi->dev = dev;
+ hdmi->encoder = encoder;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+ hdmi->bridge = msm_hdmi_bridge_init(hdmi);
+ if (IS_ERR(hdmi->bridge)) {
+ ret = PTR_ERR(hdmi->bridge);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ hdmi->bridge = NULL;
+ goto fail;
+ }
+
+ if (hdmi->next_bridge) {
+ ret = drm_bridge_attach(hdmi->encoder, hdmi->next_bridge, hdmi->bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to attach next HDMI bridge: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ hdmi->connector = drm_bridge_connector_init(hdmi->dev, encoder);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ hdmi->connector = NULL;
+ goto fail;
+ }
+
+ drm_connector_attach_encoder(hdmi->connector, hdmi->encoder);
+
+ hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!hdmi->irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(dev->dev, "failed to get irq\n");
+ goto fail;
+ }
+
+ ret = devm_request_irq(dev->dev, hdmi->irq,
+ msm_hdmi_irq, IRQF_TRIGGER_HIGH,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
+ }
+
+ drm_bridge_connector_enable_hpd(hdmi->connector);
+
+ ret = msm_hdmi_hpd_enable(hdmi->bridge);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return 0;
+
+fail:
+ /* bridge is normally destroyed by drm: */
+ if (hdmi->bridge) {
+ msm_hdmi_bridge_destroy(hdmi->bridge);
+ hdmi->bridge = NULL;
+ }
+ if (hdmi->connector) {
+ hdmi->connector->funcs->destroy(hdmi->connector);
+ hdmi->connector = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * The hdmi device:
+ */
+
+#define HDMI_CFG(item, entry) \
+ .item ## _names = item ##_names_ ## entry, \
+ .item ## _cnt = ARRAY_SIZE(item ## _names_ ## entry)
+
+static const char *hpd_reg_names_8960[] = {"core-vdda"};
+static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
+
+static struct hdmi_platform_config hdmi_tx_8960_config = {
+ HDMI_CFG(hpd_reg, 8960),
+ HDMI_CFG(hpd_clk, 8960),
+};
+
+static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
+static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
+static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
+static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
+
+static struct hdmi_platform_config hdmi_tx_8974_config = {
+ HDMI_CFG(pwr_reg, 8x74),
+ HDMI_CFG(pwr_clk, 8x74),
+ HDMI_CFG(hpd_clk, 8x74),
+ .hpd_freq = hpd_clk_freq_8x74,
+};
+
+/*
+ * HDMI audio codec callbacks
+ */
+static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ unsigned int chan;
+ unsigned int channel_allocation = 0;
+ unsigned int rate;
+ unsigned int level_shift = 0; /* 0dB */
+ bool down_mix = false;
+
+ DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ params->sample_width, params->cea.channels);
+
+ switch (params->cea.channels) {
+ case 2:
+ /* FR and FL speakers */
+ channel_allocation = 0;
+ chan = MSM_HDMI_AUDIO_CHANNEL_2;
+ break;
+ case 4:
+ /* FC, LFE, FR and FL speakers */
+ channel_allocation = 0x3;
+ chan = MSM_HDMI_AUDIO_CHANNEL_4;
+ break;
+ case 6:
+ /* RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x0B;
+ chan = MSM_HDMI_AUDIO_CHANNEL_6;
+ break;
+ case 8:
+ /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x1F;
+ chan = MSM_HDMI_AUDIO_CHANNEL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+
+ msm_hdmi_audio_set_sample_rate(hdmi, rate);
+ msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
+ level_shift, down_mix);
+
+ return 0;
+}
+
+static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+
+ msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
+}
+
+static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
+ .hw_params = msm_hdmi_audio_hw_params,
+ .audio_shutdown = msm_hdmi_audio_shutdown,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &msm_hdmi_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+};
+
+static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
+{
+ hdmi->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
+}
+
+static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct hdmi_platform_config *hdmi_cfg;
+ struct hdmi *hdmi;
+ struct device_node *of_node = dev->of_node;
+ int err;
+
+ hdmi_cfg = (struct hdmi_platform_config *)
+ of_device_get_match_data(dev);
+ if (!hdmi_cfg) {
+ DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
+ return -ENXIO;
+ }
+
+ hdmi_cfg->mmio_name = "core_physical";
+ hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
+
+ dev->platform_data = hdmi_cfg;
+
+ hdmi = msm_hdmi_init(to_platform_device(dev));
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+ priv->hdmi = hdmi;
+
+ err = msm_hdmi_register_audio_driver(hdmi, dev);
+ if (err) {
+ DRM_ERROR("Failed to attach an audio codec %d\n", err);
+ hdmi->audio_pdev = NULL;
+ }
+
+ return 0;
+}
+
+static void msm_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+
+ if (priv->hdmi) {
+ if (priv->hdmi->audio_pdev)
+ platform_device_unregister(priv->hdmi->audio_pdev);
+
+ msm_hdmi_destroy(priv->hdmi);
+ priv->hdmi = NULL;
+ }
+}
+
+static const struct component_ops msm_hdmi_ops = {
+ .bind = msm_hdmi_bind,
+ .unbind = msm_hdmi_unbind,
+};
+
+static int msm_hdmi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &msm_hdmi_ops);
+}
+
+static int msm_hdmi_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &msm_hdmi_ops);
+ return 0;
+}
+
+static const struct of_device_id msm_hdmi_dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
+ { .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config },
+ { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config },
+ { .compatible = "qcom,hdmi-tx-8974", .data = &hdmi_tx_8974_config },
+ { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config },
+ { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8960_config },
+ {}
+};
+
+static struct platform_driver msm_hdmi_driver = {
+ .probe = msm_hdmi_dev_probe,
+ .remove = msm_hdmi_dev_remove,
+ .driver = {
+ .name = "hdmi_msm",
+ .of_match_table = msm_hdmi_dt_match,
+ },
+};
+
+void __init msm_hdmi_register(void)
+{
+ msm_hdmi_phy_driver_register();
+ platform_driver_register(&msm_hdmi_driver);
+}
+
+void __exit msm_hdmi_unregister(void)
+{
+ platform_driver_unregister(&msm_hdmi_driver);
+ msm_hdmi_phy_driver_unregister();
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 000000000..04a74381a
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __HDMI_CONNECTOR_H__
+#define __HDMI_CONNECTOR_H__
+
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hdmi.h>
+
+#include <drm/drm_bridge.h>
+
+#include "msm_drv.h"
+#include "hdmi.xml.h"
+
+struct hdmi_phy;
+struct hdmi_platform_config;
+
+struct hdmi_audio {
+ bool enabled;
+ struct hdmi_audio_infoframe infoframe;
+ int rate;
+};
+
+struct hdmi_hdcp_ctrl;
+
+struct hdmi {
+ struct drm_device *dev;
+ struct platform_device *pdev;
+ struct platform_device *audio_pdev;
+
+ const struct hdmi_platform_config *config;
+
+ /* audio state: */
+ struct hdmi_audio audio;
+
+ /* video state: */
+ bool power_on;
+ unsigned long int pixclock;
+
+ void __iomem *mmio;
+ void __iomem *qfprom_mmio;
+ phys_addr_t mmio_phy_addr;
+
+ struct regulator_bulk_data *hpd_regs;
+ struct regulator_bulk_data *pwr_regs;
+ struct clk **hpd_clks;
+ struct clk **pwr_clks;
+
+ struct gpio_desc *hpd_gpiod;
+
+ struct hdmi_phy *phy;
+ struct device *phy_dev;
+
+ struct i2c_adapter *i2c;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ struct drm_bridge *next_bridge;
+
+ /* the encoder we are hooked to (outside of hdmi block) */
+ struct drm_encoder *encoder;
+
+ bool hdmi_mode; /* are we in hdmi mode? */
+
+ int irq;
+ struct workqueue_struct *workq;
+
+ struct hdmi_hdcp_ctrl *hdcp_ctrl;
+
+ /*
+ * spinlock to protect registers shared by different execution
+ * REG_HDMI_CTRL
+ * REG_HDMI_DDC_ARBITRATION
+ * REG_HDMI_HDCP_INT_CTRL
+ * REG_HDMI_HPD_CTRL
+ */
+ spinlock_t reg_lock;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct hdmi_platform_config {
+ const char *mmio_name;
+ const char *qfprom_mmio_name;
+
+ /* regulators that need to be on for hpd: */
+ const char **hpd_reg_names;
+ int hpd_reg_cnt;
+
+ /* regulators that need to be on for screen pwr: */
+ const char **pwr_reg_names;
+ int pwr_reg_cnt;
+
+ /* clks that need to be on for hpd: */
+ const char **hpd_clk_names;
+ const long unsigned *hpd_freq;
+ int hpd_clk_cnt;
+
+ /* clks that need to be on for screen pwr (ie pixel clk): */
+ const char **pwr_clk_names;
+ int pwr_clk_cnt;
+};
+
+struct hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+ struct work_struct hpd_work;
+};
+#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
+
+void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+
+static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
+{
+ msm_writel(data, hdmi->mmio + reg);
+}
+
+static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
+{
+ return msm_readl(hdmi->mmio + reg);
+}
+
+static inline u32 hdmi_qfprom_read(struct hdmi *hdmi, u32 reg)
+{
+ return msm_readl(hdmi->qfprom_mmio + reg);
+}
+
+/*
+ * hdmi phy:
+ */
+
+enum hdmi_phy_type {
+ MSM_HDMI_PHY_8x60,
+ MSM_HDMI_PHY_8960,
+ MSM_HDMI_PHY_8x74,
+ MSM_HDMI_PHY_8996,
+ MSM_HDMI_PHY_MAX,
+};
+
+struct hdmi_phy_cfg {
+ enum hdmi_phy_type type;
+ void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
+ void (*powerdown)(struct hdmi_phy *phy);
+ const char * const *reg_names;
+ int num_regs;
+ const char * const *clk_names;
+ int num_clks;
+};
+
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg;
+
+struct hdmi_phy {
+ struct platform_device *pdev;
+ void __iomem *mmio;
+ struct hdmi_phy_cfg *cfg;
+ const struct hdmi_phy_funcs *funcs;
+ struct regulator_bulk_data *regs;
+ struct clk **clks;
+};
+
+static inline void hdmi_phy_write(struct hdmi_phy *phy, u32 reg, u32 data)
+{
+ msm_writel(data, phy->mmio + reg);
+}
+
+static inline u32 hdmi_phy_read(struct hdmi_phy *phy, u32 reg)
+{
+ return msm_readl(phy->mmio + reg);
+}
+
+int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy);
+void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy);
+void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock);
+void msm_hdmi_phy_powerdown(struct hdmi_phy *phy);
+void __init msm_hdmi_phy_driver_register(void);
+void __exit msm_hdmi_phy_driver_unregister(void);
+
+#ifdef CONFIG_COMMON_CLK
+int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+int msm_hdmi_pll_8996_init(struct platform_device *pdev);
+#else
+static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+/*
+ * audio:
+ */
+/* Supported HDMI Audio channels and rates */
+#define MSM_HDMI_AUDIO_CHANNEL_2 0
+#define MSM_HDMI_AUDIO_CHANNEL_4 1
+#define MSM_HDMI_AUDIO_CHANNEL_6 2
+#define MSM_HDMI_AUDIO_CHANNEL_8 3
+
+#define HDMI_SAMPLE_RATE_32KHZ 0
+#define HDMI_SAMPLE_RATE_44_1KHZ 1
+#define HDMI_SAMPLE_RATE_48KHZ 2
+#define HDMI_SAMPLE_RATE_88_2KHZ 3
+#define HDMI_SAMPLE_RATE_96KHZ 4
+#define HDMI_SAMPLE_RATE_176_4KHZ 5
+#define HDMI_SAMPLE_RATE_192KHZ 6
+
+int msm_hdmi_audio_update(struct hdmi *hdmi);
+int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+ uint32_t num_of_channels, uint32_t channel_allocation,
+ uint32_t level_shift, bool down_mix);
+void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate);
+
+
+/*
+ * hdmi bridge:
+ */
+
+struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi);
+void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
+
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge);
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge);
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge);
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge);
+
+/*
+ * i2c adapter for ddc:
+ */
+
+void msm_hdmi_i2c_irq(struct i2c_adapter *i2c);
+void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c);
+struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi);
+
+/*
+ * hdcp
+ */
+#ifdef CONFIG_DRM_MSM_HDMI_HDCP
+struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi);
+void msm_hdmi_hdcp_destroy(struct hdmi *hdmi);
+void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+#else
+static inline struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
+{
+ return ERR_PTR(-ENXIO);
+}
+static inline void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) {}
+static inline void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+#endif
+
+#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 000000000..c9eb26df6
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,1377 @@
+#ifndef HDMI_XML
+#define HDMI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum hdmi_hdcp_key_state {
+ HDCP_KEYS_STATE_NO_KEYS = 0,
+ HDCP_KEYS_STATE_NOT_CHECKED = 1,
+ HDCP_KEYS_STATE_CHECKING = 2,
+ HDCP_KEYS_STATE_VALID = 3,
+ HDCP_KEYS_STATE_AKSV_NOT_VALID = 4,
+ HDCP_KEYS_STATE_CHKSUM_MISMATCH = 5,
+ HDCP_KEYS_STATE_PROD_AKSV = 6,
+ HDCP_KEYS_STATE_RESERVED = 7,
+};
+
+enum hdmi_ddc_read_write {
+ DDC_WRITE = 0,
+ DDC_READ = 1,
+};
+
+enum hdmi_acr_cts {
+ ACR_NONE = 0,
+ ACR_32 = 1,
+ ACR_44 = 2,
+ ACR_48 = 3,
+};
+
+#define REG_HDMI_CTRL 0x00000000
+#define HDMI_CTRL_ENABLE 0x00000001
+#define HDMI_CTRL_HDMI 0x00000002
+#define HDMI_CTRL_ENCRYPTED 0x00000004
+
+#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
+#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
+
+#define REG_HDMI_ACR_PKT_CTRL 0x00000024
+#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
+#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
+#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
+#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
+static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
+static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
+
+#define REG_HDMI_VBI_PKT_CTRL 0x00000028
+#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
+#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
+#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
+#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
+#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
+#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
+
+#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
+#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
+#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+
+#define REG_HDMI_INFOFRAME_CTRL1 0x00000030
+#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK 0x0000003f
+#define HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT 0
+static inline uint32_t HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(uint32_t val)
+{
+ return ((val) << HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+}
+#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK 0x00003f00
+#define HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT 8
+static inline uint32_t HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE(uint32_t val)
+{
+ return ((val) << HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_AUDIO_INFO_LINE__MASK;
+}
+#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK 0x003f0000
+#define HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT 16
+static inline uint32_t HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE(uint32_t val)
+{
+ return ((val) << HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_MPEG_INFO_LINE__MASK;
+}
+#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK 0x3f000000
+#define HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT 24
+static inline uint32_t HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE(uint32_t val)
+{
+ return ((val) << HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__SHIFT) & HDMI_INFOFRAME_CTRL1_VENSPEC_INFO_LINE__MASK;
+}
+
+#define REG_HDMI_GEN_PKT_CTRL 0x00000034
+#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
+#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
+#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
+}
+
+#define REG_HDMI_GC 0x00000040
+#define HDMI_GC_MUTE 0x00000001
+
+#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
+#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
+#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
+
+static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
+
+#define REG_HDMI_GENERIC0_HDR 0x00000084
+
+static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
+
+#define REG_HDMI_GENERIC1_HDR 0x000000a4
+
+static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
+
+static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
+#define HDMI_ACR_0_CTS__MASK 0xfffff000
+#define HDMI_ACR_0_CTS__SHIFT 12
+static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
+{
+ return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
+}
+
+static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; }
+#define HDMI_ACR_1_N__MASK 0xffffffff
+#define HDMI_ACR_1_N__SHIFT 0
+static inline uint32_t HDMI_ACR_1_N(uint32_t val)
+{
+ return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO0 0x000000e4
+#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
+#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
+}
+#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
+#define HDMI_AUDIO_INFO0_CC__SHIFT 8
+static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO1 0x000000e8
+#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
+#define HDMI_AUDIO_INFO1_CA__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
+}
+#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
+#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
+static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
+}
+#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
+
+#define REG_HDMI_HDCP_CTRL 0x00000110
+#define HDMI_HDCP_CTRL_ENABLE 0x00000001
+#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
+
+#define REG_HDMI_HDCP_DEBUG_CTRL 0x00000114
+#define HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER 0x00000004
+
+#define REG_HDMI_HDCP_INT_CTRL 0x00000118
+#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT 0x00000001
+#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK 0x00000002
+#define HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK 0x00000004
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT 0x00000010
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK 0x00000020
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK 0x00000040
+#define HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK 0x00000080
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT 0x00000100
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_ACK 0x00000200
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_MASK 0x00000400
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT 0x00001000
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_ACK 0x00002000
+#define HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_MASK 0x00004000
+
+#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
+#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
+#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
+#define HDMI_HDCP_LINK0_STATUS_RI_MATCHES 0x00001000
+#define HDMI_HDCP_LINK0_STATUS_V_MATCHES 0x00100000
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
+static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
+{
+ return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
+}
+
+#define REG_HDMI_HDCP_DDC_CTRL_0 0x00000120
+#define HDMI_HDCP_DDC_CTRL_0_DISABLE 0x00000001
+
+#define REG_HDMI_HDCP_DDC_CTRL_1 0x00000124
+#define HDMI_HDCP_DDC_CTRL_1_FAILED_ACK 0x00000001
+
+#define REG_HDMI_HDCP_DDC_STATUS 0x00000128
+#define HDMI_HDCP_DDC_STATUS_XFER_REQ 0x00000010
+#define HDMI_HDCP_DDC_STATUS_XFER_DONE 0x00000400
+#define HDMI_HDCP_DDC_STATUS_ABORTED 0x00001000
+#define HDMI_HDCP_DDC_STATUS_TIMEOUT 0x00002000
+#define HDMI_HDCP_DDC_STATUS_NACK0 0x00004000
+#define HDMI_HDCP_DDC_STATUS_NACK1 0x00008000
+#define HDMI_HDCP_DDC_STATUS_FAILED 0x00010000
+
+#define REG_HDMI_HDCP_ENTROPY_CTRL0 0x0000012c
+
+#define REG_HDMI_HDCP_ENTROPY_CTRL1 0x0000025c
+
+#define REG_HDMI_HDCP_RESET 0x00000130
+#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
+
+#define REG_HDMI_HDCP_RCVPORT_DATA0 0x00000134
+
+#define REG_HDMI_HDCP_RCVPORT_DATA1 0x00000138
+
+#define REG_HDMI_HDCP_RCVPORT_DATA2_0 0x0000013c
+
+#define REG_HDMI_HDCP_RCVPORT_DATA2_1 0x00000140
+
+#define REG_HDMI_HDCP_RCVPORT_DATA3 0x00000144
+
+#define REG_HDMI_HDCP_RCVPORT_DATA4 0x00000148
+
+#define REG_HDMI_HDCP_RCVPORT_DATA5 0x0000014c
+
+#define REG_HDMI_HDCP_RCVPORT_DATA6 0x00000150
+
+#define REG_HDMI_HDCP_RCVPORT_DATA7 0x00000154
+
+#define REG_HDMI_HDCP_RCVPORT_DATA8 0x00000158
+
+#define REG_HDMI_HDCP_RCVPORT_DATA9 0x0000015c
+
+#define REG_HDMI_HDCP_RCVPORT_DATA10 0x00000160
+
+#define REG_HDMI_HDCP_RCVPORT_DATA11 0x00000164
+
+#define REG_HDMI_HDCP_RCVPORT_DATA12 0x00000168
+
+#define REG_HDMI_VENSPEC_INFO0 0x0000016c
+
+#define REG_HDMI_VENSPEC_INFO1 0x00000170
+
+#define REG_HDMI_VENSPEC_INFO2 0x00000174
+
+#define REG_HDMI_VENSPEC_INFO3 0x00000178
+
+#define REG_HDMI_VENSPEC_INFO4 0x0000017c
+
+#define REG_HDMI_VENSPEC_INFO5 0x00000180
+
+#define REG_HDMI_VENSPEC_INFO6 0x00000184
+
+#define REG_HDMI_AUDIO_CFG 0x000001d0
+#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
+static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
+}
+
+#define REG_HDMI_USEC_REFTIMER 0x00000208
+
+#define REG_HDMI_DDC_CTRL 0x0000020c
+#define HDMI_DDC_CTRL_GO 0x00000001
+#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
+#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
+#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
+static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_ARBITRATION 0x00000210
+#define HDMI_DDC_ARBITRATION_HW_ARBITRATION 0x00000010
+
+#define REG_HDMI_DDC_INT_CTRL 0x00000214
+#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
+#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
+#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
+
+#define REG_HDMI_DDC_SW_STATUS 0x00000218
+#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
+#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
+#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
+#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
+
+#define REG_HDMI_DDC_HW_STATUS 0x0000021c
+#define HDMI_DDC_HW_STATUS_DONE 0x00000008
+
+#define REG_HDMI_DDC_SPEED 0x00000220
+#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
+#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
+static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
+}
+#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
+#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
+static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
+}
+
+#define REG_HDMI_DDC_SETUP 0x00000224
+#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
+#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
+static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
+}
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
+#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
+}
+#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
+#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
+#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
+#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
+#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_DATA 0x00000238
+#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
+#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
+static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
+}
+#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
+#define HDMI_DDC_DATA_DATA__SHIFT 8
+static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
+}
+#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
+#define HDMI_DDC_DATA_INDEX__SHIFT 16
+static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
+}
+#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
+
+#define REG_HDMI_HDCP_SHA_CTRL 0x0000023c
+
+#define REG_HDMI_HDCP_SHA_STATUS 0x00000240
+#define HDMI_HDCP_SHA_STATUS_BLOCK_DONE 0x00000001
+#define HDMI_HDCP_SHA_STATUS_COMP_DONE 0x00000010
+
+#define REG_HDMI_HDCP_SHA_DATA 0x00000244
+#define HDMI_HDCP_SHA_DATA_DONE 0x00000001
+
+#define REG_HDMI_HPD_INT_STATUS 0x00000250
+#define HDMI_HPD_INT_STATUS_INT 0x00000001
+#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
+
+#define REG_HDMI_HPD_INT_CTRL 0x00000254
+#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
+#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
+#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
+#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
+#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
+#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
+
+#define REG_HDMI_HPD_CTRL 0x00000258
+#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
+#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
+static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
+}
+#define HDMI_HPD_CTRL_ENABLE 0x10000000
+
+#define REG_HDMI_DDC_REF 0x0000027c
+#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
+#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
+#define HDMI_DDC_REF_REFTIMER__SHIFT 0
+static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
+{
+ return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
+}
+
+#define REG_HDMI_HDCP_SW_UPPER_AKSV 0x00000284
+
+#define REG_HDMI_HDCP_SW_LOWER_AKSV 0x00000288
+
+#define REG_HDMI_CEC_CTRL 0x0000028c
+
+#define REG_HDMI_CEC_WR_DATA 0x00000290
+
+#define REG_HDMI_CEC_CEC_RETRANSMIT 0x00000294
+
+#define REG_HDMI_CEC_STATUS 0x00000298
+
+#define REG_HDMI_CEC_INT 0x0000029c
+
+#define REG_HDMI_CEC_ADDR 0x000002a0
+
+#define REG_HDMI_CEC_TIME 0x000002a4
+
+#define REG_HDMI_CEC_REFTIMER 0x000002a8
+
+#define REG_HDMI_CEC_RD_DATA 0x000002ac
+
+#define REG_HDMI_CEC_RD_FILTER 0x000002b0
+
+#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
+#define HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff
+#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
+#define HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff
+#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000
+#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
+#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff
+#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
+}
+#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000
+#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
+}
+
+#define REG_HDMI_TOTAL 0x000002c0
+#define HDMI_TOTAL_H_TOTAL__MASK 0x00001fff
+#define HDMI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
+}
+#define HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000
+#define HDMI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
+static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_FRAME_CTRL 0x000002c8
+#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
+#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
+#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
+#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
+
+#define REG_HDMI_AUD_INT 0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+
+#define REG_HDMI_PHY_CTRL 0x000002d4
+#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
+#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
+#define HDMI_PHY_CTRL_SW_RESET 0x00000004
+#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
+
+#define REG_HDMI_CEC_WR_RANGE 0x000002dc
+
+#define REG_HDMI_CEC_RD_RANGE 0x000002e0
+
+#define REG_HDMI_VERSION 0x000002e4
+
+#define REG_HDMI_CEC_COMPL_CTL 0x00000360
+
+#define REG_HDMI_CEC_RD_START_RANGE 0x00000364
+
+#define REG_HDMI_CEC_RD_TOTAL_RANGE 0x00000368
+
+#define REG_HDMI_CEC_RD_ERR_RESP_LO 0x0000036c
+
+#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
+
+#define REG_HDMI_8x60_PHY_REG0 0x00000000
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
+static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG1 0x00000004
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
+static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
+}
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
+static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG2 0x00000008
+#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
+#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
+#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
+#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
+
+#define REG_HDMI_8x60_PHY_REG3 0x0000000c
+#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
+
+#define REG_HDMI_8x60_PHY_REG4 0x00000010
+
+#define REG_HDMI_8x60_PHY_REG5 0x00000014
+
+#define REG_HDMI_8x60_PHY_REG6 0x00000018
+
+#define REG_HDMI_8x60_PHY_REG7 0x0000001c
+
+#define REG_HDMI_8x60_PHY_REG8 0x00000020
+
+#define REG_HDMI_8x60_PHY_REG9 0x00000024
+
+#define REG_HDMI_8x60_PHY_REG10 0x00000028
+
+#define REG_HDMI_8x60_PHY_REG11 0x0000002c
+
+#define REG_HDMI_8x60_PHY_REG12 0x00000030
+#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
+#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
+#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
+
+#define REG_HDMI_8960_PHY_REG0 0x00000000
+
+#define REG_HDMI_8960_PHY_REG1 0x00000004
+
+#define REG_HDMI_8960_PHY_REG2 0x00000008
+
+#define REG_HDMI_8960_PHY_REG3 0x0000000c
+
+#define REG_HDMI_8960_PHY_REG4 0x00000010
+
+#define REG_HDMI_8960_PHY_REG5 0x00000014
+
+#define REG_HDMI_8960_PHY_REG6 0x00000018
+
+#define REG_HDMI_8960_PHY_REG7 0x0000001c
+
+#define REG_HDMI_8960_PHY_REG8 0x00000020
+
+#define REG_HDMI_8960_PHY_REG9 0x00000024
+
+#define REG_HDMI_8960_PHY_REG10 0x00000028
+
+#define REG_HDMI_8960_PHY_REG11 0x0000002c
+
+#define REG_HDMI_8960_PHY_REG12 0x00000030
+#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020
+#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080
+
+#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000034
+
+#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000038
+
+#define REG_HDMI_8960_PHY_REG_MISC0 0x0000003c
+
+#define REG_HDMI_8960_PHY_REG13 0x00000040
+
+#define REG_HDMI_8960_PHY_REG14 0x00000044
+
+#define REG_HDMI_8960_PHY_REG15 0x00000048
+
+#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000000
+
+#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000004
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000008
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000000c
+
+#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000010
+
+#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000014
+
+#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000018
+#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002
+#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000001c
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000020
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000024
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000028
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000002c
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000030
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000034
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000038
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000003c
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000040
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000044
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000048
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000004c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000050
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000054
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000058
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000005c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000060
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000064
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000068
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000006c
+
+#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000070
+
+#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000074
+
+#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000078
+
+#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000007c
+
+#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000080
+
+#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000084
+
+#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000088
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000008c
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000090
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000094
+
+#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000098
+#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001
+
+#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000009c
+
+#define REG_HDMI_8x74_ANA_CFG0 0x00000000
+
+#define REG_HDMI_8x74_ANA_CFG1 0x00000004
+
+#define REG_HDMI_8x74_PD_CTRL0 0x00000010
+
+#define REG_HDMI_8x74_PD_CTRL1 0x00000014
+
+#define REG_HDMI_8x74_BIST_CFG0 0x00000034
+
+#define REG_HDMI_8x74_BIST_PATN0 0x0000003c
+
+#define REG_HDMI_8x74_BIST_PATN1 0x00000040
+
+#define REG_HDMI_8x74_BIST_PATN2 0x00000044
+
+#define REG_HDMI_8x74_BIST_PATN3 0x00000048
+
+#define REG_HDMI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
+
+#define REG_HDMI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
+
+#define REG_HDMI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
+
+#define REG_HDMI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
+
+#define REG_HDMI_28nm_PHY_PLL_VREG_CFG 0x00000010
+
+#define REG_HDMI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
+
+#define REG_HDMI_28nm_PHY_PLL_DMUX_CFG 0x00000018
+
+#define REG_HDMI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
+
+#define REG_HDMI_28nm_PHY_PLL_GLB_CFG 0x00000020
+#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
+#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
+#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
+#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
+
+#define REG_HDMI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
+
+#define REG_HDMI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
+
+#define REG_HDMI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
+
+#define REG_HDMI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
+
+#define REG_HDMI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
+
+#define REG_HDMI_28nm_PHY_PLL_SDM_CFG0 0x00000038
+
+#define REG_HDMI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
+
+#define REG_HDMI_28nm_PHY_PLL_SDM_CFG2 0x00000040
+
+#define REG_HDMI_28nm_PHY_PLL_SDM_CFG3 0x00000044
+
+#define REG_HDMI_28nm_PHY_PLL_SDM_CFG4 0x00000048
+
+#define REG_HDMI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
+
+#define REG_HDMI_28nm_PHY_PLL_SSC_CFG1 0x00000050
+
+#define REG_HDMI_28nm_PHY_PLL_SSC_CFG2 0x00000054
+
+#define REG_HDMI_28nm_PHY_PLL_SSC_CFG3 0x00000058
+
+#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
+
+#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
+
+#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
+
+#define REG_HDMI_28nm_PHY_PLL_TEST_CFG 0x00000068
+#define HDMI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG1 0x00000070
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG2 0x00000074
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG3 0x00000078
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG5 0x00000080
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG6 0x00000084
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG7 0x00000088
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG9 0x00000090
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG10 0x00000094
+
+#define REG_HDMI_28nm_PHY_PLL_CAL_CFG11 0x00000098
+
+#define REG_HDMI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
+
+#define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
+
+#define REG_HDMI_8996_PHY_CFG 0x00000000
+
+#define REG_HDMI_8996_PHY_PD_CTL 0x00000004
+
+#define REG_HDMI_8996_PHY_MODE 0x00000008
+
+#define REG_HDMI_8996_PHY_MISR_CLEAR 0x0000000c
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG0 0x00000010
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_CFG1 0x00000014
+
+#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE0 0x00000018
+
+#define REG_HDMI_8996_PHY_TX0_TX1_PRBS_SEED_BYTE1 0x0000001c
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN0 0x00000020
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_PATTERN1 0x00000024
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG0 0x00000028
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_CFG1 0x0000002c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE0 0x00000030
+
+#define REG_HDMI_8996_PHY_TX2_TX3_PRBS_SEED_BYTE1 0x00000034
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN0 0x00000038
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_PATTERN1 0x0000003c
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS_SEL 0x00000040
+
+#define REG_HDMI_8996_PHY_TXCAL_CFG0 0x00000044
+
+#define REG_HDMI_8996_PHY_TXCAL_CFG1 0x00000048
+
+#define REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL 0x0000004c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL 0x00000050
+
+#define REG_HDMI_8996_PHY_LANE_BIST_CONFIG 0x00000054
+
+#define REG_HDMI_8996_PHY_CLOCK 0x00000058
+
+#define REG_HDMI_8996_PHY_MISC1 0x0000005c
+
+#define REG_HDMI_8996_PHY_MISC2 0x00000060
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS0 0x00000064
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS1 0x00000068
+
+#define REG_HDMI_8996_PHY_TX0_TX1_BIST_STATUS2 0x0000006c
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS0 0x00000070
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS1 0x00000074
+
+#define REG_HDMI_8996_PHY_TX2_TX3_BIST_STATUS2 0x00000078
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS0 0x0000007c
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS1 0x00000080
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS2 0x00000084
+
+#define REG_HDMI_8996_PHY_PRE_MISR_STATUS3 0x00000088
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS0 0x0000008c
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS1 0x00000090
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS2 0x00000094
+
+#define REG_HDMI_8996_PHY_POST_MISR_STATUS3 0x00000098
+
+#define REG_HDMI_8996_PHY_STATUS 0x0000009c
+
+#define REG_HDMI_8996_PHY_MISC3_STATUS 0x000000a0
+
+#define REG_HDMI_8996_PHY_MISC4_STATUS 0x000000a4
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS0 0x000000a8
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS1 0x000000ac
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS2 0x000000b0
+
+#define REG_HDMI_8996_PHY_DEBUG_BUS3 0x000000b4
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID0 0x000000b8
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID1 0x000000bc
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID2 0x000000c0
+
+#define REG_HDMI_8996_PHY_PHY_REVISION_ID3 0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL1 0x00000000
+
+#define REG_HDMI_PHY_QSERDES_COM_ATB_SEL2 0x00000004
+
+#define REG_HDMI_PHY_QSERDES_COM_FREQ_UPDATE 0x00000008
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_TIMER 0x0000000c
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER 0x00000010
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER1 0x00000014
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_ADJ_PER2 0x00000018
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_PER1 0x0000001c
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_PER2 0x00000020
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1 0x00000024
+
+#define REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2 0x00000028
+
+#define REG_HDMI_PHY_QSERDES_COM_POST_DIV 0x0000002c
+
+#define REG_HDMI_PHY_QSERDES_COM_POST_DIV_MUX 0x00000030
+
+#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x00000034
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1 0x00000038
+
+#define REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL 0x0000003c
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE 0x00000040
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_EN 0x00000044
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_IVCO 0x00000048
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0 0x0000004c
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0 0x00000050
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0 0x00000054
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE1 0x00000058
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE1 0x0000005c
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE1 0x00000060
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE2 0x00000064
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD0 0x00000064
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE2 0x00000068
+
+#define REG_HDMI_PHY_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x00000068
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE2 0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_TRIM 0x00000070
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_EP_DIV 0x00000074
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0 0x00000078
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE1 0x0000007c
+
+#define REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE2 0x00000080
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD1 0x00000080
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0 0x00000084
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE1 0x00000088
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE2 0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD2 0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0 0x00000090
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE1 0x00000094
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE2 0x00000098
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD3 0x00000098
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_CNTRL 0x0000009c
+
+#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_CTRL 0x000000a0
+
+#define REG_HDMI_PHY_QSERDES_COM_PHASE_SEL_DC 0x000000a4
+
+#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_IN_SYNC_SEL 0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL 0x000000ac
+
+#define REG_HDMI_PHY_QSERDES_COM_CML_SYSCLK_SEL 0x000000b0
+
+#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL 0x000000b4
+
+#define REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL2 0x000000b8
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL 0x000000bc
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CTRL2 0x000000c0
+
+#define REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM 0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN 0x000000c8
+
+#define REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_CFG 0x000000cc
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0 0x000000d0
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE1 0x000000d4
+
+#define REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE2 0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0 0x000000dc
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0 0x000000e0
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0 0x000000e4
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE1 0x000000e8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE1 0x000000ec
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE1 0x000000f0
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE2 0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL1 0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE2 0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MINVAL2 0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE2 0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD4 0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_INITVAL 0x00000100
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_EN 0x00000104
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00000108
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x0000010c
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00000110
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00000114
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE2 0x00000118
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL1 0x00000118
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE2 0x0000011c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAXVAL2 0x0000011c
+
+#define REG_HDMI_PHY_QSERDES_COM_RES_TRIM_CONTROL2 0x00000120
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL 0x00000124
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP 0x00000128
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE0 0x0000012c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE0 0x00000130
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE1 0x00000134
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE1 0x00000138
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE1_MODE2 0x0000013c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL1 0x0000013c
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE2_MODE2 0x00000140
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_INITVAL2 0x00000140
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER1 0x00000144
+
+#define REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_TIMER2 0x00000148
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR 0x0000014c
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CLK 0x00000150
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_OUT_STATUS 0x00000154
+
+#define REG_HDMI_PHY_QSERDES_COM_SAR_CODE_READY_STATUS 0x00000158
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_STATUS 0x0000015c
+
+#define REG_HDMI_PHY_QSERDES_COM_RESET_SM_STATUS 0x00000160
+
+#define REG_HDMI_PHY_QSERDES_COM_RESTRIM_CODE_STATUS 0x00000164
+
+#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE1_STATUS 0x00000168
+
+#define REG_HDMI_PHY_QSERDES_COM_PLLCAL_CODE2_STATUS 0x0000016c
+
+#define REG_HDMI_PHY_QSERDES_COM_BG_CTRL 0x00000170
+
+#define REG_HDMI_PHY_QSERDES_COM_CLK_SELECT 0x00000174
+
+#define REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL 0x00000178
+
+#define REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x0000017c
+
+#define REG_HDMI_PHY_QSERDES_COM_PLL_ANALOG 0x00000180
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV 0x00000184
+
+#define REG_HDMI_PHY_QSERDES_COM_SW_RESET 0x00000188
+
+#define REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN 0x0000018c
+
+#define REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS 0x00000190
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG 0x00000194
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RATE_OVERRIDE 0x00000198
+
+#define REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL 0x0000019c
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS0 0x000001a0
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS1 0x000001a4
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS2 0x000001a8
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS3 0x000001ac
+
+#define REG_HDMI_PHY_QSERDES_COM_DEBUG_BUS_SEL 0x000001b0
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC1 0x000001b4
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_MISC2 0x000001b8
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE1 0x000001bc
+
+#define REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV_MODE2 0x000001c0
+
+#define REG_HDMI_PHY_QSERDES_COM_CMN_RSVD5 0x000001c4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_MODE_LANENO 0x00000000
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_INVERT 0x00000004
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE 0x00000008
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_ONE 0x0000000c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_TWO 0x00000010
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_CMN_CONTROL_THREE 0x00000014
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL 0x00000018
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POST2_EMPH 0x0000001c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BOOST_LVL_UP_DN 0x00000020
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES 0x00000024
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_IDLE_LVL_LARGE_AMP 0x00000028
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL 0x0000002c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET 0x00000030
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN 0x00000034
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRE_STALL_LDO_BOOST_EN 0x00000038
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND 0x0000003c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_SLEW_CNTL 0x00000040
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_INTERFACE_SELECT 0x00000044
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_LPB_EN 0x00000048
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_TX 0x0000004c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_RX 0x00000050
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET 0x00000054
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH1 0x00000058
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PERL_LENGTH2 0x0000005c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_SERDES_BYP_EN_OUT 0x00000060
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_DEBUG_BUS_SEL 0x00000064
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x00000068
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_POL_INV 0x0000006c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN 0x00000070
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN1 0x00000074
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN2 0x00000078
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN3 0x0000007c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN4 0x00000080
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN5 0x00000084
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN6 0x00000088
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN7 0x0000008c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_PATTERN8 0x00000090
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE 0x00000094
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE 0x00000098
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_IDAC_CAL_LANE_MODE_CONFIGURATION 0x0000009c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL1 0x000000a0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_ATB_SEL2 0x000000a4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL 0x000000a8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RCV_DETECT_LVL_2 0x000000ac
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED1 0x000000b0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED2 0x000000b4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED3 0x000000b8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PRBS_SEED4 0x000000bc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN 0x000000c0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_RESET_GEN_MUXES 0x000000c4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN 0x000000c8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_INTERFACE_MODE 0x000000cc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_CTRL 0x000000d0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_ENCODED_OR_DATA 0x000000d4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND2 0x000000d8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND2 0x000000dc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND2 0x000000e0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND2 0x000000e4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_1_DIVIDER_BAND0_1 0x000000e8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_2_DIVIDER_BAND0_1 0x000000ec
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_3_DIVIDER_BAND0_1 0x000000f0
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_PWM_GEAR_4_DIVIDER_BAND0_1 0x000000f4
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1 0x000000f8
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2 0x000000fc
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV_CNTL 0x00000100
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_STATUS 0x00000104
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT1 0x00000108
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_BIST_ERROR_COUNT2 0x0000010c
+
+#define REG_HDMI_PHY_QSERDES_TX_LX_TX_ALOG_INTF_OBSV 0x00000110
+
+
+#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
new file mode 100644
index 000000000..4c2058c4a
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/hdmi.h>
+#include "hdmi.h"
+
+/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
+static int nchannels[] = { 2, 4, 6, 8 };
+
+/* Supported HDMI Audio sample rates */
+#define MSM_HDMI_SAMPLE_RATE_32KHZ 0
+#define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1
+#define MSM_HDMI_SAMPLE_RATE_48KHZ 2
+#define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3
+#define MSM_HDMI_SAMPLE_RATE_96KHZ 4
+#define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5
+#define MSM_HDMI_SAMPLE_RATE_192KHZ 6
+#define MSM_HDMI_SAMPLE_RATE_MAX 7
+
+
+struct hdmi_msm_audio_acr {
+ uint32_t n; /* N parameter for clock regeneration */
+ uint32_t cts; /* CTS parameter for clock regeneration */
+};
+
+struct hdmi_msm_audio_arcs {
+ unsigned long int pixclock;
+ struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX];
+};
+
+#define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ }
+
+/* Audio constants lookup table for hdmi_msm_audio_acr_setup */
+/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
+static const struct hdmi_msm_audio_arcs acr_lut[] = {
+ /* 25.200MHz */
+ HDMI_MSM_AUDIO_ARCS(25200, {
+ {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
+ {12288, 25200}, {25088, 28000}, {24576, 25200} }),
+ /* 27.000MHz */
+ HDMI_MSM_AUDIO_ARCS(27000, {
+ {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
+ {12288, 27000}, {25088, 30000}, {24576, 27000} }),
+ /* 27.027MHz */
+ HDMI_MSM_AUDIO_ARCS(27030, {
+ {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
+ {12288, 27027}, {25088, 30030}, {24576, 27027} }),
+ /* 74.250MHz */
+ HDMI_MSM_AUDIO_ARCS(74250, {
+ {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
+ {12288, 74250}, {25088, 82500}, {24576, 74250} }),
+ /* 148.500MHz */
+ HDMI_MSM_AUDIO_ARCS(148500, {
+ {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000},
+ {12288, 148500}, {25088, 165000}, {24576, 148500} }),
+};
+
+static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(acr_lut); i++) {
+ const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i];
+ if (arcs->pixclock == pixclock)
+ return arcs;
+ }
+
+ return NULL;
+}
+
+int msm_hdmi_audio_update(struct hdmi *hdmi)
+{
+ struct hdmi_audio *audio = &hdmi->audio;
+ struct hdmi_audio_infoframe *info = &audio->infoframe;
+ const struct hdmi_msm_audio_arcs *arcs = NULL;
+ bool enabled = audio->enabled;
+ uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl;
+ uint32_t infofrm_ctrl, audio_config;
+
+ DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, "
+ "level_shift_value=%d, downmix_inhibit=%d, rate=%d",
+ audio->enabled, info->channels, info->channel_allocation,
+ info->level_shift_value, info->downmix_inhibit, audio->rate);
+ DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock);
+
+ if (enabled && !(hdmi->power_on && hdmi->pixclock)) {
+ DBG("disabling audio: no video");
+ enabled = false;
+ }
+
+ if (enabled) {
+ arcs = get_arcs(hdmi->pixclock);
+ if (!arcs) {
+ DBG("disabling audio: unsupported pixclock: %lu",
+ hdmi->pixclock);
+ enabled = false;
+ }
+ }
+
+ /* Read first before writing */
+ acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL);
+ vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1);
+ infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0);
+ audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG);
+
+ /* Clear N/CTS selection bits */
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SELECT__MASK;
+
+ if (enabled) {
+ uint32_t n, cts, multiplier;
+ enum hdmi_acr_cts select;
+ uint8_t buf[14];
+
+ n = arcs->lut[audio->rate].n;
+ cts = arcs->lut[audio->rate].cts;
+
+ if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) {
+ multiplier = 4;
+ n >>= 2; /* divide N by 4 and use multiplier */
+ } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) {
+ multiplier = 2;
+ n >>= 1; /* divide N by 2 and use multiplier */
+ } else {
+ multiplier = 1;
+ }
+
+ DBG("n=%u, cts=%u, multiplier=%u", n, cts, multiplier);
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SOURCE;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier);
+
+ if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate))
+ select = ACR_48;
+ else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) ||
+ (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate))
+ select = ACR_44;
+ else /* default to 32k */
+ select = ACR_32;
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SELECT(select);
+
+ hdmi_write(hdmi, REG_HDMI_ACR_0(select - 1),
+ HDMI_ACR_0_CTS(cts));
+ hdmi_write(hdmi, REG_HDMI_ACR_1(select - 1),
+ HDMI_ACR_1_N(n));
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2,
+ COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) |
+ HDMI_AUDIO_PKT_CTRL2_OVERRIDE);
+
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT;
+ acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND;
+
+ /* configure infoframe: */
+ hdmi_audio_infoframe_pack(info, buf, sizeof(buf));
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0,
+ (buf[3] << 0) | (buf[4] << 8) |
+ (buf[5] << 16) | (buf[6] << 24));
+ hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1,
+ (buf[7] << 0) | (buf[8] << 8));
+
+ hdmi_write(hdmi, REG_HDMI_GC, 0);
+
+ vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE;
+ vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
+
+ aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
+
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
+ infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+
+ audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
+ audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4);
+ audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE;
+ } else {
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT;
+ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND;
+ vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE;
+ vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME;
+ aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE;
+ infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE;
+ audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl);
+
+ hdmi_write(hdmi, REG_HDMI_AUD_INT,
+ COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) |
+ COND(enabled, HDMI_AUD_INT_AUD_SAM_DROP_INT));
+
+ hdmi_write(hdmi, REG_HDMI_AUDIO_CFG, audio_config);
+
+
+ DBG("audio %sabled", enabled ? "en" : "dis");
+
+ return 0;
+}
+
+int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
+ uint32_t num_of_channels, uint32_t channel_allocation,
+ uint32_t level_shift, bool down_mix)
+{
+ struct hdmi_audio *audio;
+
+ if (!hdmi)
+ return -ENXIO;
+
+ audio = &hdmi->audio;
+
+ if (num_of_channels >= ARRAY_SIZE(nchannels))
+ return -EINVAL;
+
+ audio->enabled = enabled;
+ audio->infoframe.channels = nchannels[num_of_channels];
+ audio->infoframe.channel_allocation = channel_allocation;
+ audio->infoframe.level_shift_value = level_shift;
+ audio->infoframe.downmix_inhibit = down_mix;
+
+ return msm_hdmi_audio_update(hdmi);
+}
+
+void msm_hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate)
+{
+ struct hdmi_audio *audio;
+
+ if (!hdmi)
+ return;
+
+ audio = &hdmi->audio;
+
+ if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX))
+ return;
+
+ audio->rate = rate;
+ msm_hdmi_audio_update(hdmi);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
new file mode 100644
index 000000000..9b1391d27
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_edid.h>
+
+#include "msm_kms.h"
+#include "hdmi.h"
+
+void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+
+ msm_hdmi_hpd_disable(hdmi_bridge);
+ drm_bridge_remove(bridge);
+}
+
+static void msm_hdmi_power_on(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+
+ ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
+
+ if (config->pwr_clk_cnt > 0) {
+ DBG("pixclock: %lu", hdmi->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ config->pwr_clk_names[0], ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ }
+ }
+}
+
+static void power_off(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ /* TODO do we need to wait for final vblank somewhere before
+ * cutting the clocks?
+ */
+ mdelay(16 + 4);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
+
+ pm_runtime_put(&hdmi->pdev->dev);
+}
+
+#define AVI_IFRAME_LINE_NUMBER 1
+
+static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
+{
+ struct drm_crtc *crtc = hdmi->encoder->crtc;
+ const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ union hdmi_infoframe frame;
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u32 val;
+ int len;
+
+ drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ hdmi->connector, mode);
+
+ len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (len < 0) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
+ "failed to configure avi infoframe\n");
+ return;
+ }
+
+ /*
+ * the AVI_INFOx registers don't map exactly to how the AVI infoframes
+ * are packed according to the spec. The checksum from the header is
+ * written to the LSB byte of AVI_INFO0 and the version is written to
+ * the third byte from the LSB of AVI_INFO3
+ */
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(0),
+ buffer[3] |
+ buffer[4] << 8 |
+ buffer[5] << 16 |
+ buffer[6] << 24);
+
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(1),
+ buffer[7] |
+ buffer[8] << 8 |
+ buffer[9] << 16 |
+ buffer[10] << 24);
+
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(2),
+ buffer[11] |
+ buffer[12] << 8 |
+ buffer[13] << 16 |
+ buffer[14] << 24);
+
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(3),
+ buffer[15] |
+ buffer[16] << 8 |
+ buffer[1] << 24);
+
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ HDMI_INFOFRAME_CTRL0_AVI_SEND |
+ HDMI_INFOFRAME_CTRL0_AVI_CONT);
+
+ val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ val &= ~HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE__MASK;
+ val |= HDMI_INFOFRAME_CTRL1_AVI_INFO_LINE(AVI_IFRAME_LINE_NUMBER);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, val);
+}
+
+static void msm_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DBG("power up");
+
+ if (!hdmi->power_on) {
+ msm_hdmi_phy_resource_enable(phy);
+ msm_hdmi_power_on(bridge);
+ hdmi->power_on = true;
+ if (hdmi->hdmi_mode) {
+ msm_hdmi_config_avi_infoframe(hdmi);
+ msm_hdmi_audio_update(hdmi);
+ }
+ }
+
+ msm_hdmi_phy_powerup(phy, hdmi->pixclock);
+
+ msm_hdmi_set_mode(hdmi, true);
+
+ if (hdmi->hdcp_ctrl)
+ msm_hdmi_hdcp_on(hdmi->hdcp_ctrl);
+}
+
+static void msm_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ if (hdmi->hdcp_ctrl)
+ msm_hdmi_hdcp_off(hdmi->hdcp_ctrl);
+
+ DBG("power down");
+ msm_hdmi_set_mode(hdmi, false);
+
+ msm_hdmi_phy_powerdown(phy);
+
+ if (hdmi->power_on) {
+ power_off(bridge);
+ hdmi->power_on = false;
+ if (hdmi->hdmi_mode)
+ msm_hdmi_audio_update(hdmi);
+ msm_hdmi_phy_resource_disable(phy);
+ }
+}
+
+static void msm_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+
+ mode = adjusted_mode;
+
+ hdmi->pixclock = mode->clock * 1000;
+
+ hstart = mode->htotal - mode->hsync_start;
+ hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ HDMI_ACTIVE_HSYNC_START(hstart) |
+ HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ HDMI_ACTIVE_VSYNC_START(vstart) |
+ HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(0) |
+ HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DBG("frame_ctrl=%08x", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ if (hdmi->hdmi_mode)
+ msm_hdmi_audio_update(hdmi);
+}
+
+static struct edid *msm_hdmi_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct edid *edid;
+ uint32_t hdmi_ctrl;
+
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
+
+ return edid;
+}
+
+static enum drm_mode_status msm_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct msm_drm_private *priv = bridge->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+
+ /* for mdp5/apq8074, we manage our own pixel clk (as opposed to
+ * mdp4/dtv stuff where pixel clk is assigned to mdp/encoder
+ * instead):
+ */
+ if (kms->funcs->round_pixclk)
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi_bridge->hdmi->encoder);
+ else if (config->pwr_clk_cnt > 0)
+ actual = clk_round_rate(hdmi->pwr_clks[0], requested);
+ else
+ actual = requested;
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs msm_hdmi_bridge_funcs = {
+ .pre_enable = msm_hdmi_bridge_pre_enable,
+ .post_disable = msm_hdmi_bridge_post_disable,
+ .mode_set = msm_hdmi_bridge_mode_set,
+ .mode_valid = msm_hdmi_bridge_mode_valid,
+ .get_edid = msm_hdmi_bridge_get_edid,
+ .detect = msm_hdmi_bridge_detect,
+};
+
+static void
+msm_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct hdmi_bridge *hdmi_bridge =
+ container_of(work, struct hdmi_bridge, hpd_work);
+ struct drm_bridge *bridge = &hdmi_bridge->base;
+
+ drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge));
+}
+
+/* initialize bridge */
+struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
+{
+ struct drm_bridge *bridge = NULL;
+ struct hdmi_bridge *hdmi_bridge;
+ int ret;
+
+ hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
+ sizeof(*hdmi_bridge), GFP_KERNEL);
+ if (!hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_bridge->hdmi = hdmi;
+ INIT_WORK(&hdmi_bridge->hpd_work, msm_hdmi_hotplug_work);
+
+ bridge = &hdmi_bridge->base;
+ bridge->funcs = &msm_hdmi_bridge_funcs;
+ bridge->ddc = hdmi->i2c;
+ bridge->type = DRM_MODE_CONNECTOR_HDMIA;
+ bridge->ops = DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID;
+
+ drm_bridge_add(bridge);
+
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ msm_hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
new file mode 100644
index 000000000..e7748461c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -0,0 +1,1428 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include "hdmi.h"
+#include <linux/qcom_scm.h>
+
+#define HDCP_REG_ENABLE 0x01
+#define HDCP_REG_DISABLE 0x00
+#define HDCP_PORT_ADDR 0x74
+
+#define HDCP_INT_STATUS_MASK ( \
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_XFER_REQ_INT | \
+ HDMI_HDCP_INT_CTRL_AUTH_XFER_DONE_INT)
+
+#define AUTH_WORK_RETRIES_TIME 100
+#define AUTH_RETRIES_TIME 30
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB 0x000000F8
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB 0x000000FC
+#define HDCP_KSV_LSB 0x000060D8
+#define HDCP_KSV_MSB 0x000060DC
+
+enum DS_TYPE { /* type of downstream device */
+ DS_UNKNOWN,
+ DS_RECEIVER,
+ DS_REPEATER,
+};
+
+enum hdmi_hdcp_state {
+ HDCP_STATE_NO_AKSV,
+ HDCP_STATE_INACTIVE,
+ HDCP_STATE_AUTHENTICATING,
+ HDCP_STATE_AUTHENTICATED,
+ HDCP_STATE_AUTH_FAILED
+};
+
+struct hdmi_hdcp_reg_data {
+ u32 reg_id;
+ u32 off;
+ char *name;
+ u32 reg_val;
+};
+
+struct hdmi_hdcp_ctrl {
+ struct hdmi *hdmi;
+ u32 auth_retries;
+ bool tz_hdcp;
+ enum hdmi_hdcp_state hdcp_state;
+ struct work_struct hdcp_auth_work;
+ struct work_struct hdcp_reauth_work;
+
+#define AUTH_ABORT_EV 1
+#define AUTH_RESULT_RDY_EV 2
+ unsigned long auth_event;
+ wait_queue_head_t auth_event_queue;
+
+ u32 ksv_fifo_w_index;
+ /*
+ * store aksv from qfprom
+ */
+ u32 aksv_lsb;
+ u32 aksv_msb;
+ bool aksv_valid;
+ u32 ds_type;
+ u32 bksv_lsb;
+ u32 bksv_msb;
+ u8 dev_count;
+ u8 depth;
+ u8 ksv_list[5 * 127];
+ bool max_cascade_exceeded;
+ bool max_dev_exceeded;
+};
+
+static int msm_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 5;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ }, {
+ .addr = addr >> 1,
+ .flags = I2C_M_RD,
+ .len = data_len,
+ .buf = data,
+ }
+ };
+
+ DBG("Start DDC read");
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 2);
+
+ retry--;
+ if (rc == 2)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC read %d", rc);
+
+ return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
+
+static int msm_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 10;
+ u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ }
+ };
+
+ DBG("Start DDC write");
+ if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+ pr_err("%s: write size too big\n", __func__);
+ return -ERANGE;
+ }
+
+ buf[0] = offset;
+ memcpy(&buf[1], data, data_len);
+ msgs[0].buf = buf;
+ msgs[0].len = data_len + 1;
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 1);
+
+ retry--;
+ if (rc == 1)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC write %d", rc);
+
+ return rc;
+}
+
+static int msm_hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
+ u32 *pdata, u32 count)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ struct qcom_scm_hdcp_req scm_buf[QCOM_SCM_HDCP_MAX_REQ_CNT];
+ u32 resp, phy_addr, idx = 0;
+ int i, ret = 0;
+
+ WARN_ON(!pdata || !preg || (count == 0));
+
+ if (hdcp_ctrl->tz_hdcp) {
+ phy_addr = (u32)hdmi->mmio_phy_addr;
+
+ while (count) {
+ memset(scm_buf, 0, sizeof(scm_buf));
+ for (i = 0; i < count && i < QCOM_SCM_HDCP_MAX_REQ_CNT;
+ i++) {
+ scm_buf[i].addr = phy_addr + preg[idx];
+ scm_buf[i].val = pdata[idx];
+ idx++;
+ }
+ ret = qcom_scm_hdcp_req(scm_buf, i, &resp);
+
+ if (ret || resp) {
+ pr_err("%s: error: scm_call ret=%d resp=%u\n",
+ __func__, ret, resp);
+ ret = -EINVAL;
+ break;
+ }
+
+ count -= i;
+ }
+ } else {
+ for (i = 0; i < count; i++)
+ hdmi_write(hdmi, preg[i], pdata[i]);
+ }
+
+ return ret;
+}
+
+void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val, hdcp_int_status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_INT_CTRL);
+ hdcp_int_status = reg_val & HDCP_INT_STATUS_MASK;
+ if (!hdcp_int_status) {
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ return;
+ }
+ /* Clear Interrupts */
+ reg_val |= hdcp_int_status << 1;
+ /* Clear AUTH_FAIL_INFO as well */
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT)
+ reg_val |= HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK;
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ DBG("hdcp irq %x", hdcp_int_status);
+
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_INT) {
+ pr_info("%s:AUTH_SUCCESS_INT received\n", __func__);
+ if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+ set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ }
+ }
+
+ if (hdcp_int_status & HDMI_HDCP_INT_CTRL_AUTH_FAIL_INT) {
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ pr_info("%s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+ __func__, reg_val);
+ if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state)
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+ else if (HDCP_STATE_AUTHENTICATING ==
+ hdcp_ctrl->hdcp_state) {
+ set_bit(AUTH_RESULT_RDY_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ }
+ }
+}
+
+static int msm_hdmi_hdcp_msleep(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 ms, u32 ev)
+{
+ int rc;
+
+ rc = wait_event_timeout(hdcp_ctrl->auth_event_queue,
+ !!test_bit(ev, &hdcp_ctrl->auth_event),
+ msecs_to_jiffies(ms));
+ if (rc) {
+ pr_info("%s: msleep is canceled by event %d\n",
+ __func__, ev);
+ clear_bit(ev, &hdcp_ctrl->auth_event);
+ return -ECANCELED;
+ }
+
+ return 0;
+}
+
+static int msm_hdmi_hdcp_read_validate_aksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+ /* Fetch aksv from QFPROM, this info should be public. */
+ hdcp_ctrl->aksv_lsb = hdmi_qfprom_read(hdmi, HDCP_KSV_LSB);
+ hdcp_ctrl->aksv_msb = hdmi_qfprom_read(hdmi, HDCP_KSV_MSB);
+
+ /* check there are 20 ones in AKSV */
+ if ((hweight32(hdcp_ctrl->aksv_lsb) + hweight32(hdcp_ctrl->aksv_msb))
+ != 20) {
+ pr_err("%s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
+ __func__);
+ pr_err("%s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
+ __func__, hdcp_ctrl->aksv_msb,
+ hdcp_ctrl->aksv_lsb);
+ return -EINVAL;
+ }
+ DBG("AKSV=%02x%08x", hdcp_ctrl->aksv_msb, hdcp_ctrl->aksv_lsb);
+
+ return 0;
+}
+
+static int msm_reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val, failure, nack0;
+ int rc = 0;
+
+ /* Check for any DDC transfer failures */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ failure = reg_val & HDMI_HDCP_DDC_STATUS_FAILED;
+ nack0 = reg_val & HDMI_HDCP_DDC_STATUS_NACK0;
+ DBG("HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d",
+ reg_val, failure, nack0);
+
+ if (failure) {
+ /*
+ * Indicates that the last HDCP HW DDC transfer failed.
+ * This occurs when a transfer is attempted with HDCP DDC
+ * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+ * matches HDCP_DDC_RETRY_CNT.
+ * Failure occurred, let's clear it.
+ */
+ DBG("DDC failure detected");
+
+ /* First, Disable DDC */
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0,
+ HDMI_HDCP_DDC_CTRL_0_DISABLE);
+
+ /* ACK the Failure to Clear it */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_CTRL_1);
+ reg_val |= HDMI_HDCP_DDC_CTRL_1_FAILED_ACK;
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_1, reg_val);
+
+ /* Check if the FAILURE got Cleared */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ if (reg_val & HDMI_HDCP_DDC_STATUS_FAILED)
+ pr_info("%s: Unable to clear HDCP DDC Failure\n",
+ __func__);
+
+ /* Re-Enable HDCP DDC */
+ hdmi_write(hdmi, REG_HDMI_HDCP_DDC_CTRL_0, 0);
+ }
+
+ if (nack0) {
+ DBG("Before: HDMI_DDC_SW_STATUS=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+ /* Reset HDMI DDC software status */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val |= HDMI_DDC_CTRL_SW_STATUS_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val &= ~HDMI_DDC_CTRL_SW_STATUS_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ /* Reset HDMI DDC Controller */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val |= HDMI_DDC_CTRL_SOFT_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+
+ /* If previous msleep is aborted, skip this msleep */
+ if (!rc)
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_CTRL);
+ reg_val &= ~HDMI_DDC_CTRL_SOFT_RESET;
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL, reg_val);
+ DBG("After: HDMI_DDC_SW_STATUS=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS));
+ }
+
+ return rc;
+}
+
+static int msm_hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 hdcp_ddc_status, ddc_hw_status;
+ u32 xfer_done, xfer_req, hw_done;
+ bool hw_not_ready;
+ u32 timeout_count;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+
+ if (hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS) == 0)
+ return 0;
+
+ /* Wait to be clean on DDC HW engine */
+ timeout_count = 100;
+ do {
+ hdcp_ddc_status = hdmi_read(hdmi, REG_HDMI_HDCP_DDC_STATUS);
+ ddc_hw_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+
+ xfer_done = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_DONE;
+ xfer_req = hdcp_ddc_status & HDMI_HDCP_DDC_STATUS_XFER_REQ;
+ hw_done = ddc_hw_status & HDMI_DDC_HW_STATUS_DONE;
+ hw_not_ready = !xfer_done || xfer_req || !hw_done;
+
+ if (hw_not_ready)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_warn("%s: hw_ddc_clean failed\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static void msm_hdmi_hdcp_reauth_work(struct work_struct *work)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+ struct hdmi_hdcp_ctrl, hdcp_reauth_work);
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ u32 reg_val;
+
+ DBG("HDCP REAUTH WORK");
+ /*
+ * Disable HPD circuitry.
+ * This is needed to reset the HDCP cipher engine so that when we
+ * attempt a re-authentication, HW would clear the AN0_READY and
+ * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+ /* Disable HDCP interrupts */
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+ HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+ /* Wait to be clean on DDC HW engine */
+ if (msm_hdmi_hdcp_hw_ddc_clean(hdcp_ctrl)) {
+ pr_info("%s: reauth work aborted\n", __func__);
+ return;
+ }
+
+ /* Disable encryption and disable the HDCP block */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+ /* Enable HPD circuitry */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val |= HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Only retry defined times then abort current authenticating process
+ */
+ if (++hdcp_ctrl->auth_retries == AUTH_RETRIES_TIME) {
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ hdcp_ctrl->auth_retries = 0;
+ pr_info("%s: abort reauthentication!\n", __func__);
+
+ return;
+ }
+
+ DBG("Queue AUTH WORK");
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+static int msm_hdmi_hdcp_auth_prepare(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ u32 reg_val;
+ unsigned long flags;
+ int rc;
+
+ if (!hdcp_ctrl->aksv_valid) {
+ rc = msm_hdmi_hdcp_read_validate_aksv(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: ASKV validation failed\n", __func__);
+ hdcp_ctrl->hdcp_state = HDCP_STATE_NO_AKSV;
+ return -ENOTSUPP;
+ }
+ hdcp_ctrl->aksv_valid = true;
+ }
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ /* disable HDMI Encrypt */
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ /* Enabling Software DDC */
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+ reg_val &= ~HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+ hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Write AKSV read from QFPROM to the HDCP registers.
+ * This step is needed for HDCP authentication and must be
+ * written before enabling HDCP.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_SW_LOWER_AKSV, hdcp_ctrl->aksv_lsb);
+ hdmi_write(hdmi, REG_HDMI_HDCP_SW_UPPER_AKSV, hdcp_ctrl->aksv_msb);
+
+ /*
+ * HDCP setup prior to enabling HDCP_CTRL.
+ * Setup seed values for random number An.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+ hdmi_write(hdmi, REG_HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+ /* Disable the RngCipher state */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL);
+ reg_val &= ~HDMI_HDCP_DEBUG_CTRL_RNG_CIPHER;
+ hdmi_write(hdmi, REG_HDMI_HDCP_DEBUG_CTRL, reg_val);
+ DBG("HDCP_DEBUG_CTRL=0x%08x",
+ hdmi_read(hdmi, REG_HDMI_HDCP_DEBUG_CTRL));
+
+ /*
+ * Ensure that all register writes are completed before
+ * enabling HDCP cipher
+ */
+ wmb();
+
+ /*
+ * Enable HDCP
+ * This needs to be done as early as possible in order for the
+ * hardware to make An available to read
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, HDMI_HDCP_CTRL_ENABLE);
+
+ /*
+ * If we had stale values for the An ready bit, it should most
+ * likely be cleared now after enabling HDCP cipher
+ */
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ DBG("After enabling HDCP Link0_Status=0x%08x", link0_status);
+ if (!(link0_status &
+ (HDMI_HDCP_LINK0_STATUS_AN_0_READY |
+ HDMI_HDCP_LINK0_STATUS_AN_1_READY)))
+ DBG("An not ready after enabling HDCP");
+
+ /* Clear any DDC failures from previous tries before enable HDCP*/
+ rc = msm_reset_hdcp_ddc_failures(hdcp_ctrl);
+
+ return rc;
+}
+
+static void msm_hdmi_hdcp_auth_fail(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ DBG("hdcp auth failed, queue reauth work");
+ /* clear HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAILED;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_reauth_work);
+}
+
+static void msm_hdmi_hdcp_auth_done(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ /*
+ * Disable software DDC before going into part3 to make sure
+ * there is no Arbitration between software and hardware for DDC
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_ARBITRATION);
+ reg_val |= HDMI_DDC_ARBITRATION_HW_ARBITRATION;
+ hdmi_write(hdmi, REG_HDMI_DDC_ARBITRATION, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /* enable HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val |= HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+ hdcp_ctrl->auth_retries = 0;
+}
+
+/*
+ * hdcp authenticating part 1
+ * Wait Key/An ready
+ * Read BCAPS from sink
+ * Write BCAPS and AKSV into HDCP engine
+ * Write An and AKSV to sink
+ * Read BKSV from sink and write into HDCP engine
+ */
+static int msm_hdmi_hdcp_wait_key_an_ready(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status, keys_state;
+ u32 timeout_count;
+ bool an_ready;
+
+ /* Wait for HDCP keys to be checked and validated */
+ timeout_count = 100;
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ keys_state = (link0_status >> 28) & 0x7;
+ if (keys_state == HDCP_KEYS_STATE_VALID)
+ break;
+
+ DBG("Keys not ready(%d). s=%d, l0=%0x08x",
+ timeout_count, keys_state, link0_status);
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait key state timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ timeout_count = 100;
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ an_ready = (link0_status & HDMI_HDCP_LINK0_STATUS_AN_0_READY)
+ && (link0_status & HDMI_HDCP_LINK0_STATUS_AN_1_READY);
+ if (an_ready)
+ break;
+
+ DBG("An not ready(%d). l0_status=0x%08x",
+ timeout_count, link0_status);
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait An timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static int msm_hdmi_hdcp_send_aksv_an(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_aksv_0, link0_aksv_1;
+ u32 link0_an[2];
+ u8 aksv[5];
+
+ /* Read An0 and An1 */
+ link0_an[0] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA5);
+ link0_an[1] = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA6);
+
+ /* Read AKSV */
+ link0_aksv_0 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA3);
+ link0_aksv_1 = hdmi_read(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4);
+
+ DBG("Link ASKV=%08x%08x", link0_aksv_0, link0_aksv_1);
+ /* Copy An and AKSV to byte arrays for transmission */
+ aksv[0] = link0_aksv_0 & 0xFF;
+ aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
+ aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+ aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+ aksv[4] = link0_aksv_1 & 0xFF;
+
+ /* Write An to offset 0x18 */
+ rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x18, (u8 *)link0_an,
+ (u16)sizeof(link0_an));
+ if (rc) {
+ pr_err("%s:An write failed\n", __func__);
+ return rc;
+ }
+ DBG("Link0-An=%08x%08x", link0_an[0], link0_an[1]);
+
+ /* Write AKSV to offset 0x10 */
+ rc = msm_hdmi_ddc_write(hdmi, HDCP_PORT_ADDR, 0x10, aksv, 5);
+ if (rc) {
+ pr_err("%s:AKSV write failed\n", __func__);
+ return rc;
+ }
+ DBG("Link0-AKSV=%02x%08x", link0_aksv_1 & 0xFF, link0_aksv_0);
+
+ return 0;
+}
+
+static int msm_hdmi_hdcp_recv_bksv(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u8 bksv[5];
+ u32 reg[2], data[2];
+
+ /* Read BKSV at offset 0x00 */
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x00, bksv, 5);
+ if (rc) {
+ pr_err("%s:BKSV read failed\n", __func__);
+ return rc;
+ }
+
+ hdcp_ctrl->bksv_lsb = bksv[0] | (bksv[1] << 8) |
+ (bksv[2] << 16) | (bksv[3] << 24);
+ hdcp_ctrl->bksv_msb = bksv[4];
+ DBG(":BKSV=%02x%08x", hdcp_ctrl->bksv_msb, hdcp_ctrl->bksv_lsb);
+
+ /* check there are 20 ones in BKSV */
+ if ((hweight32(hdcp_ctrl->bksv_lsb) + hweight32(hdcp_ctrl->bksv_msb))
+ != 20) {
+ pr_err(": BKSV doesn't have 20 1's and 20 0's\n");
+ pr_err(": BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+ bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+ return -EINVAL;
+ }
+
+ /* Write BKSV read from sink to HDCP registers */
+ reg[0] = REG_HDMI_HDCP_RCVPORT_DATA0;
+ data[0] = hdcp_ctrl->bksv_lsb;
+ reg[1] = REG_HDMI_HDCP_RCVPORT_DATA1;
+ data[1] = hdcp_ctrl->bksv_msb;
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+ return rc;
+}
+
+static int msm_hdmi_hdcp_recv_bcaps(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg, data;
+ u8 bcaps;
+
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ if (rc) {
+ pr_err("%s:BCAPS read failed\n", __func__);
+ return rc;
+ }
+ DBG("BCAPS=%02x", bcaps);
+
+ /* receiver (0), repeater (1) */
+ hdcp_ctrl->ds_type = (bcaps & BIT(6)) ? DS_REPEATER : DS_RECEIVER;
+
+ /* Write BCAPS to the hardware */
+ reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+ data = (u32)bcaps;
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+ return rc;
+}
+
+static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ int rc;
+
+ /* Wait for AKSV key and An ready */
+ rc = msm_hdmi_hdcp_wait_key_an_ready(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: wait key and an ready failed\n", __func__);
+ return rc;
+ }
+
+ /* Read BCAPS and send to HDCP engine */
+ rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: read bcaps error, abort\n", __func__);
+ return rc;
+ }
+
+ /*
+ * 1.1_Features turned off by default.
+ * No need to write AInfo since 1.1_Features is disabled.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA4, 0);
+
+ /* Send AKSV and An to sink */
+ rc = msm_hdmi_hdcp_send_aksv_an(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s:An/Aksv write failed\n", __func__);
+ return rc;
+ }
+
+ /* Read BKSV and send to HDCP engine*/
+ rc = msm_hdmi_hdcp_recv_bksv(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s:BKSV Process failed\n", __func__);
+ return rc;
+ }
+
+ /* Enable HDCP interrupts and ack/clear any stale interrupts */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL,
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_ACK |
+ HDMI_HDCP_INT_CTRL_AUTH_SUCCESS_MASK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_ACK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_MASK |
+ HDMI_HDCP_INT_CTRL_AUTH_FAIL_INFO_ACK);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ return 0;
+}
+
+/* read R0' from sink and pass it to HDCP engine */
+static int msm_hdmi_hdcp_auth_part1_recv_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ int rc = 0;
+ u8 buf[2];
+
+ /*
+ * HDCP Compliance Test case 1A-01:
+ * Wait here at least 100ms before reading R0'
+ */
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 125, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+
+ /* Read R0' at offset 0x08 */
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x08, buf, 2);
+ if (rc) {
+ pr_err("%s:R0' read failed\n", __func__);
+ return rc;
+ }
+ DBG("R0'=%02x%02x", buf[1], buf[0]);
+
+ /* Write R0' to HDCP registers and check to see if it is a match */
+ hdmi_write(hdmi, REG_HDMI_HDCP_RCVPORT_DATA2_0,
+ (((u32)buf[1]) << 8) | buf[0]);
+
+ return 0;
+}
+
+/* Wait for authenticating result: R0/R0' are matched or not */
+static int msm_hdmi_hdcp_auth_part1_verify_r0(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ int rc;
+
+ /* wait for hdcp irq, 10 sec should be long enough */
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 10000, AUTH_RESULT_RDY_EV);
+ if (!rc) {
+ pr_err("%s: Wait Auth IRQ timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ if (!(link0_status & HDMI_HDCP_LINK0_STATUS_RI_MATCHES)) {
+ pr_err("%s: Authentication Part I failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Enable HDCP Encryption */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL,
+ HDMI_HDCP_CTRL_ENABLE |
+ HDMI_HDCP_CTRL_ENCRYPTION_ENABLE);
+
+ return 0;
+}
+
+static int msm_hdmi_hdcp_recv_check_bstatus(struct hdmi_hdcp_ctrl *hdcp_ctrl,
+ u16 *pbstatus)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ bool max_devs_exceeded = false, max_cascade_exceeded = false;
+ u32 repeater_cascade_depth = 0, down_stream_devices = 0;
+ u16 bstatus;
+ u8 buf[2];
+
+ /* Read BSTATUS at offset 0x41 */
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x41, buf, 2);
+ if (rc) {
+ pr_err("%s: BSTATUS read failed\n", __func__);
+ goto error;
+ }
+ *pbstatus = bstatus = (buf[1] << 8) | buf[0];
+
+
+ down_stream_devices = bstatus & 0x7F;
+ repeater_cascade_depth = (bstatus >> 8) & 0x7;
+ max_devs_exceeded = (bstatus & BIT(7)) ? true : false;
+ max_cascade_exceeded = (bstatus & BIT(11)) ? true : false;
+
+ if (down_stream_devices == 0) {
+ /*
+ * If no downstream devices are attached to the repeater
+ * then part II fails.
+ * todo: The other approach would be to continue PART II.
+ */
+ pr_err("%s: No downstream devices\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * HDCP Compliance 1B-05:
+ * Check if no. of devices connected to repeater
+ * exceed max_devices_connected from bit 7 of Bstatus.
+ */
+ if (max_devs_exceeded) {
+ pr_err("%s: no. of devs connected exceeds max allowed",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * HDCP Compliance 1B-06:
+ * Check if no. of cascade connected to repeater
+ * exceed max_cascade_connected from bit 11 of Bstatus.
+ */
+ if (max_cascade_exceeded) {
+ pr_err("%s: no. of cascade conn exceeds max allowed",
+ __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+error:
+ hdcp_ctrl->dev_count = down_stream_devices;
+ hdcp_ctrl->max_cascade_exceeded = max_cascade_exceeded;
+ hdcp_ctrl->max_dev_exceeded = max_devs_exceeded;
+ hdcp_ctrl->depth = repeater_cascade_depth;
+ return rc;
+}
+
+static int msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg, data;
+ u32 timeout_count;
+ u16 bstatus;
+ u8 bcaps;
+
+ /*
+ * Wait until READY bit is set in BCAPS, as per HDCP specifications
+ * maximum permitted time to check for READY bit is five seconds.
+ */
+ timeout_count = 100;
+ do {
+ /* Read BCAPS at offset 0x40 */
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x40, &bcaps, 1);
+ if (rc) {
+ pr_err("%s: BCAPS read failed\n", __func__);
+ return rc;
+ }
+
+ if (bcaps & BIT(5))
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Wait KSV fifo ready timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ rc = msm_hdmi_hdcp_recv_check_bstatus(hdcp_ctrl, &bstatus);
+ if (rc) {
+ pr_err("%s: bstatus error\n", __func__);
+ return rc;
+ }
+
+ /* Write BSTATUS and BCAPS to HDCP registers */
+ reg = REG_HDMI_HDCP_RCVPORT_DATA12;
+ data = bcaps | (bstatus << 8);
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+ if (rc) {
+ pr_err("%s: BSTATUS write failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * hdcp authenticating part 2: 2nd
+ * read ksv fifo from sink
+ * transfer V' from sink to HDCP engine
+ * reset SHA engine
+ */
+static int msm_hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ int rc = 0;
+ struct hdmi_hdcp_reg_data reg_data[] = {
+ {REG_HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"},
+ {REG_HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"},
+ {REG_HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"},
+ {REG_HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
+ {REG_HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
+ };
+ struct hdmi_hdcp_reg_data *rd;
+ u32 size = ARRAY_SIZE(reg_data);
+ u32 reg[ARRAY_SIZE(reg_data)];
+ u32 data[ARRAY_SIZE(reg_data)];
+ int i;
+
+ for (i = 0; i < size; i++) {
+ rd = &reg_data[i];
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR,
+ rd->off, (u8 *)&data[i], (u16)sizeof(data[i]));
+ if (rc) {
+ pr_err("%s: Read %s failed\n", __func__, rd->name);
+ goto error;
+ }
+
+ DBG("%s =%x", rd->name, data[i]);
+ reg[i] = reg_data[i].reg_id;
+ }
+
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, size);
+
+error:
+ return rc;
+}
+
+static int msm_hdmi_hdcp_recv_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 ksv_bytes;
+
+ ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+ rc = msm_hdmi_ddc_read(hdmi, HDCP_PORT_ADDR, 0x43,
+ hdcp_ctrl->ksv_list, ksv_bytes);
+ if (rc)
+ pr_err("%s: KSV FIFO read failed\n", __func__);
+
+ return rc;
+}
+
+static int msm_hdmi_hdcp_reset_sha_engine(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ u32 reg[2], data[2];
+ u32 rc = 0;
+
+ reg[0] = REG_HDMI_HDCP_SHA_CTRL;
+ data[0] = HDCP_REG_ENABLE;
+ reg[1] = REG_HDMI_HDCP_SHA_CTRL;
+ data[1] = HDCP_REG_DISABLE;
+
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, reg, data, 2);
+
+ return rc;
+}
+
+static int msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 timeout_count;
+
+ /*
+ * Read KSV FIFO over DDC
+ * Key Selection vector FIFO Used to pull downstream KSVs
+ * from HDCP Repeaters.
+ * All bytes (DEVICE_COUNT * 5) must be read in a single,
+ * auto incrementing access.
+ * All bytes read as 0x00 for HDCP Receivers that are not
+ * HDCP Repeaters (REPEATER == 0).
+ */
+ timeout_count = 100;
+ do {
+ rc = msm_hdmi_hdcp_recv_ksv_fifo(hdcp_ctrl);
+ if (!rc)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Recv ksv fifo timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 25, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ rc = msm_hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: transfer V failed\n", __func__);
+ return rc;
+ }
+
+ /* reset SHA engine before write ksv fifo */
+ rc = msm_hdmi_hdcp_reset_sha_engine(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: fail to reset sha engine\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Write KSV FIFO to HDCP_SHA_DATA.
+ * This is done 1 byte at time starting with the LSB.
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ * If the last byte is written, we need to poll for
+ * HDCP_SHA_COMP_DONE to wait until HW finish
+ */
+static int msm_hdmi_hdcp_write_ksv_fifo(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int i;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 ksv_bytes, last_byte = 0;
+ u8 *ksv_fifo = NULL;
+ u32 reg_val, data, reg;
+ u32 rc = 0;
+
+ ksv_bytes = 5 * hdcp_ctrl->dev_count;
+
+ /* Check if need to wait for HW completion */
+ if (hdcp_ctrl->ksv_fifo_w_index) {
+ reg_val = hdmi_read(hdmi, REG_HDMI_HDCP_SHA_STATUS);
+ DBG("HDCP_SHA_STATUS=%08x", reg_val);
+ if (hdcp_ctrl->ksv_fifo_w_index == ksv_bytes) {
+ /* check COMP_DONE if last write */
+ if (reg_val & HDMI_HDCP_SHA_STATUS_COMP_DONE) {
+ DBG("COMP_DONE");
+ return 0;
+ } else {
+ return -EAGAIN;
+ }
+ } else {
+ /* check BLOCK_DONE if not last write */
+ if (!(reg_val & HDMI_HDCP_SHA_STATUS_BLOCK_DONE))
+ return -EAGAIN;
+
+ DBG("BLOCK_DONE");
+ }
+ }
+
+ ksv_bytes -= hdcp_ctrl->ksv_fifo_w_index;
+ if (ksv_bytes <= 64)
+ last_byte = 1;
+ else
+ ksv_bytes = 64;
+
+ ksv_fifo = hdcp_ctrl->ksv_list;
+ ksv_fifo += hdcp_ctrl->ksv_fifo_w_index;
+
+ for (i = 0; i < ksv_bytes; i++) {
+ /* Write KSV byte and set DONE bit[0] for last byte*/
+ reg_val = ksv_fifo[i] << 16;
+ if ((i == (ksv_bytes - 1)) && last_byte)
+ reg_val |= HDMI_HDCP_SHA_DATA_DONE;
+
+ reg = REG_HDMI_HDCP_SHA_DATA;
+ data = reg_val;
+ rc = msm_hdmi_hdcp_scm_wr(hdcp_ctrl, &reg, &data, 1);
+
+ if (rc)
+ return rc;
+ }
+
+ hdcp_ctrl->ksv_fifo_w_index += ksv_bytes;
+
+ /*
+ *return -EAGAIN to notify caller to wait for COMP_DONE or BLOCK_DONE
+ */
+ return -EAGAIN;
+}
+
+/* write ksv fifo into HDCP engine */
+static int msm_hdmi_hdcp_auth_part2_write_ksv_fifo(
+ struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc;
+ u32 timeout_count;
+
+ hdcp_ctrl->ksv_fifo_w_index = 0;
+ timeout_count = 100;
+ do {
+ rc = msm_hdmi_hdcp_write_ksv_fifo(hdcp_ctrl);
+ if (!rc)
+ break;
+
+ if (rc != -EAGAIN)
+ return rc;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: Write KSV fifo timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static int msm_hdmi_hdcp_auth_part2_check_v_match(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ int rc = 0;
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 link0_status;
+ u32 timeout_count = 100;
+
+ do {
+ link0_status = hdmi_read(hdmi, REG_HDMI_HDCP_LINK0_STATUS);
+ if (link0_status & HDMI_HDCP_LINK0_STATUS_V_MATCHES)
+ break;
+
+ timeout_count--;
+ if (!timeout_count) {
+ pr_err("%s: HDCP V Match timedout", __func__);
+ return -ETIMEDOUT;
+ }
+
+ rc = msm_hdmi_hdcp_msleep(hdcp_ctrl, 20, AUTH_ABORT_EV);
+ if (rc)
+ return rc;
+ } while (1);
+
+ return 0;
+}
+
+static void msm_hdmi_hdcp_auth_work(struct work_struct *work)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+ struct hdmi_hdcp_ctrl, hdcp_auth_work);
+ int rc;
+
+ rc = msm_hdmi_hdcp_auth_prepare(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: auth prepare failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ /* HDCP PartI */
+ rc = msm_hdmi_hdcp_auth_part1_key_exchange(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: key exchange failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = msm_hdmi_hdcp_auth_part1_recv_r0(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: receive r0 failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = msm_hdmi_hdcp_auth_part1_verify_r0(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: verify r0 failed %d\n", __func__, rc);
+ goto end;
+ }
+ pr_info("%s: Authentication Part I successful\n", __func__);
+ if (hdcp_ctrl->ds_type == DS_RECEIVER)
+ goto end;
+
+ /* HDCP PartII */
+ rc = msm_hdmi_hdcp_auth_part2_wait_ksv_fifo_ready(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: wait ksv fifo ready failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = msm_hdmi_hdcp_auth_part2_recv_ksv_fifo(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: recv ksv fifo failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = msm_hdmi_hdcp_auth_part2_write_ksv_fifo(hdcp_ctrl);
+ if (rc) {
+ pr_err("%s: write ksv fifo failed %d\n", __func__, rc);
+ goto end;
+ }
+
+ rc = msm_hdmi_hdcp_auth_part2_check_v_match(hdcp_ctrl);
+ if (rc)
+ pr_err("%s: check v match failed %d\n", __func__, rc);
+
+end:
+ if (rc == -ECANCELED) {
+ pr_info("%s: hdcp authentication canceled\n", __func__);
+ } else if (rc == -ENOTSUPP) {
+ pr_info("%s: hdcp is not supported\n", __func__);
+ } else if (rc) {
+ pr_err("%s: hdcp authentication failed\n", __func__);
+ msm_hdmi_hdcp_auth_fail(hdcp_ctrl);
+ } else {
+ msm_hdmi_hdcp_auth_done(hdcp_ctrl);
+ }
+}
+
+void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ u32 reg_val;
+ unsigned long flags;
+
+ if ((HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) ||
+ (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+ DBG("still active or activating or no askv. returning");
+ return;
+ }
+
+ /* clear HDMI Encrypt */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->auth_event = 0;
+ hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+ hdcp_ctrl->auth_retries = 0;
+ queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
+}
+
+void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+ struct hdmi *hdmi = hdcp_ctrl->hdmi;
+ unsigned long flags;
+ u32 reg_val;
+
+ if ((HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) ||
+ (HDCP_STATE_NO_AKSV == hdcp_ctrl->hdcp_state)) {
+ DBG("hdcp inactive or no aksv. returning");
+ return;
+ }
+
+ /*
+ * Disable HPD circuitry.
+ * This is needed to reset the HDCP cipher engine so that when we
+ * attempt a re-authentication, HW would clear the AN0_READY and
+ * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+ */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val &= ~HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+
+ /*
+ * Disable HDCP interrupts.
+ * Also, need to set the state to inactive here so that any ongoing
+ * reauth works will know that the HDCP session has been turned off.
+ */
+ hdmi_write(hdmi, REG_HDMI_HDCP_INT_CTRL, 0);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ /*
+ * Cancel any pending auth/reauth attempts.
+ * If one is ongoing, this will wait for it to finish.
+ * No more reauthentication attempts will be scheduled since we
+ * set the current state to inactive.
+ */
+ set_bit(AUTH_ABORT_EV, &hdcp_ctrl->auth_event);
+ wake_up_all(&hdcp_ctrl->auth_event_queue);
+ cancel_work_sync(&hdcp_ctrl->hdcp_auth_work);
+ cancel_work_sync(&hdcp_ctrl->hdcp_reauth_work);
+
+ hdmi_write(hdmi, REG_HDMI_HDCP_RESET,
+ HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE);
+
+ /* Disable encryption and disable the HDCP block */
+ hdmi_write(hdmi, REG_HDMI_HDCP_CTRL, 0);
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~HDMI_CTRL_ENCRYPTED;
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ /* Enable HPD circuitry */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ reg_val |= HDMI_HPD_CTRL_ENABLE;
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, reg_val);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+
+ DBG("HDCP: Off");
+}
+
+struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
+{
+ struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+
+ if (!hdmi->qfprom_mmio) {
+ pr_err("%s: HDCP is not supported without qfprom\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+ if (!hdcp_ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&hdcp_ctrl->hdcp_auth_work, msm_hdmi_hdcp_auth_work);
+ INIT_WORK(&hdcp_ctrl->hdcp_reauth_work, msm_hdmi_hdcp_reauth_work);
+ init_waitqueue_head(&hdcp_ctrl->auth_event_queue);
+ hdcp_ctrl->hdmi = hdmi;
+ hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+ hdcp_ctrl->aksv_valid = false;
+
+ if (qcom_scm_hdcp_available())
+ hdcp_ctrl->tz_hdcp = true;
+ else
+ hdcp_ctrl->tz_hdcp = false;
+
+ return hdcp_ctrl;
+}
+
+void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
+{
+ if (hdmi) {
+ kfree(hdmi->hdcp_ctrl);
+ hdmi->hdcp_ctrl = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
new file mode 100644
index 000000000..bfa827b47
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "msm_kms.h"
+#include "hdmi.h"
+
+static void msm_hdmi_phy_reset(struct hdmi *hdmi)
+{
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
+static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret;
+
+ if (enable) {
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev,
+ "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+ }
+ } else {
+ for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+ }
+}
+
+int msm_hdmi_hpd_enable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ uint32_t hpd_ctrl;
+ int ret;
+ unsigned long flags;
+
+ ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
+ goto fail;
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
+ goto fail;
+ }
+
+ if (hdmi->hpd_gpiod)
+ gpiod_set_value_cansleep(hdmi->hpd_gpiod, 1);
+
+ pm_runtime_get_sync(dev);
+ enable_hpd_clocks(hdmi, true);
+
+ msm_hdmi_set_mode(hdmi, false);
+ msm_hdmi_phy_reset(hdmi);
+ msm_hdmi_set_mode(hdmi, true);
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_hdmi_hpd_disable(struct hdmi_bridge *hdmi_bridge)
+{
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int ret;
+
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+ msm_hdmi_set_mode(hdmi, false);
+
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put(dev);
+
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
+
+ ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret)
+ dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
+}
+
+void msm_hdmi_hpd_irq(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+
+ /* ack & disable (temporarily) HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!detected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ queue_work(hdmi->workq, &hdmi_bridge->hpd_work);
+ }
+}
+
+static enum drm_connector_status detect_reg(struct hdmi *hdmi)
+{
+ uint32_t hpd_int_status;
+
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+ enable_hpd_clocks(hdmi, true);
+
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put(&hdmi->pdev->dev);
+
+ return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+#define HPD_GPIO_INDEX 2
+static enum drm_connector_status detect_gpio(struct hdmi *hdmi)
+{
+ return gpiod_get_value(hdmi->hpd_gpiod) ?
+ connector_status_connected :
+ connector_status_disconnected;
+}
+
+enum drm_connector_status msm_hdmi_bridge_detect(
+ struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ enum drm_connector_status stat_gpio, stat_reg;
+ int retry = 20;
+
+ /*
+ * some platforms may not have hpd gpio. Rely only on the status
+ * provided by REG_HDMI_HPD_INT_STATUS in this case.
+ */
+ if (!hdmi->hpd_gpiod)
+ return detect_reg(hdmi);
+
+ do {
+ stat_gpio = detect_gpio(hdmi);
+ stat_reg = detect_reg(hdmi);
+
+ if (stat_gpio == stat_reg)
+ break;
+
+ mdelay(10);
+ } while (--retry);
+
+ /* the status we get from reading gpio seems to be more reliable,
+ * so trust that one the most if we didn't manage to get hdmi and
+ * gpio status to agree:
+ */
+ if (stat_gpio != stat_reg) {
+ DBG("HDMI_HPD_INT_STATUS tells us: %d", stat_reg);
+ DBG("hpd gpio tells us: %d", stat_gpio);
+ }
+
+ return stat_gpio;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 000000000..de182c004
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "hdmi.h"
+
+struct hdmi_i2c_adapter {
+ struct i2c_adapter base;
+ struct hdmi *hdmi;
+ bool sw_done;
+ wait_queue_head_t ddc_event;
+};
+#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
+
+static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SW_STATUS_RESET);
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SOFT_RESET);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+ HDMI_DDC_SPEED_THRESHOLD(2) |
+ HDMI_DDC_SPEED_PRESCALE(10));
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+ HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+ /* enable reference timer for 27us */
+ hdmi_write(hdmi, REG_HDMI_DDC_REF,
+ HDMI_DDC_REF_REFTIMER_ENABLE |
+ HDMI_DDC_REF_REFTIMER(27));
+}
+
+static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ uint32_t retry = 0xffff;
+ uint32_t ddc_int_ctrl;
+
+ do {
+ --retry;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK |
+ HDMI_DDC_INT_CTRL_SW_DONE_MASK);
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
+
+ if (!retry) {
+ DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ hdmi_i2c->sw_done = false;
+
+ return 0;
+}
+
+#define MAX_TRANSACTIONS 4
+
+static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ if (!hdmi_i2c->sw_done) {
+ uint32_t ddc_int_ctrl;
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
+ (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
+ hdmi_i2c->sw_done = true;
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK);
+ }
+ }
+
+ return hdmi_i2c->sw_done;
+}
+
+static int msm_hdmi_i2c_xfer(struct i2c_adapter *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ static const uint32_t nack[] = {
+ HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
+ HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
+ };
+ int indices[MAX_TRANSACTIONS];
+ int ret, i, j, index = 0;
+ uint32_t ddc_status, ddc_data, i2c_trans;
+
+ num = min(num, MAX_TRANSACTIONS);
+
+ WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
+
+ if (num == 0)
+ return num;
+
+ init_ddc(hdmi_i2c);
+
+ ret = ddc_clear_irq(hdmi_i2c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+ uint32_t raw_addr = p->addr << 1;
+
+ if (p->flags & I2C_M_RD)
+ raw_addr |= 1;
+
+ ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+
+ if (i == 0) {
+ ddc_data |= HDMI_DDC_DATA_INDEX(0) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+
+ indices[i] = index;
+
+ if (p->flags & I2C_M_RD) {
+ index += p->len;
+ } else {
+ for (j = 0; j < p->len; j++) {
+ ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+ }
+ }
+
+ i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
+ HDMI_I2C_TRANSACTION_REG_RW(
+ (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
+ HDMI_I2C_TRANSACTION_REG_START;
+
+ if (i == (num - 1))
+ i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
+
+ hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
+ }
+
+ /* trigger the transfer: */
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
+ HDMI_DDC_CTRL_GO);
+
+ ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ dev_warn(dev->dev, "DDC timeout: %d\n", ret);
+ DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
+ return ret;
+ }
+
+ ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
+
+ /* read back results of any read transactions: */
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+
+ if (!(p->flags & I2C_M_RD))
+ continue;
+
+ /* check for NACK: */
+ if (ddc_status & nack[i]) {
+ DBG("ddc_status=%08x", ddc_status);
+ break;
+ }
+
+ ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
+ HDMI_DDC_DATA_INDEX(indices[i]) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+
+ /* discard first byte: */
+ hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+
+ for (j = 0; j < p->len; j++) {
+ ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+ p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
+ }
+ }
+
+ return i;
+}
+
+static u32 msm_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm msm_hdmi_i2c_algorithm = {
+ .master_xfer = msm_hdmi_i2c_xfer,
+ .functionality = msm_hdmi_i2c_func,
+};
+
+void msm_hdmi_i2c_irq(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+
+ if (sw_done(hdmi_i2c))
+ wake_up_all(&hdmi_i2c->ddc_event);
+}
+
+void msm_hdmi_i2c_destroy(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ i2c_del_adapter(i2c);
+ kfree(hdmi_i2c);
+}
+
+struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c;
+ struct i2c_adapter *i2c = NULL;
+ int ret;
+
+ hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
+ if (!hdmi_i2c) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ i2c = &hdmi_i2c->base;
+
+ hdmi_i2c->hdmi = hdmi;
+ init_waitqueue_head(&hdmi_i2c->ddc_event);
+
+
+ i2c->owner = THIS_MODULE;
+ i2c->class = I2C_CLASS_DDC;
+ snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
+ i2c->dev.parent = &hdmi->pdev->dev;
+ i2c->algo = &msm_hdmi_i2c_algorithm;
+
+ ret = i2c_add_adapter(i2c);
+ if (ret)
+ goto fail;
+
+ return i2c;
+
+fail:
+ if (i2c)
+ msm_hdmi_i2c_destroy(i2c);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
new file mode 100644
index 000000000..9780107e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of_device.h>
+
+#include "hdmi.h"
+
+static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_cfg *cfg = phy->cfg;
+ struct device *dev = &phy->pdev->dev;
+ int i, ret;
+
+ phy->regs = devm_kcalloc(dev, cfg->num_regs, sizeof(phy->regs[0]),
+ GFP_KERNEL);
+ if (!phy->regs)
+ return -ENOMEM;
+
+ phy->clks = devm_kcalloc(dev, cfg->num_clks, sizeof(phy->clks[0]),
+ GFP_KERNEL);
+ if (!phy->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_regs; i++)
+ phy->regs[i].supply = cfg->reg_names[i];
+
+ ret = devm_regulator_bulk_get(dev, cfg->num_regs, phy->regs);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get phy regulators: %d\n", ret);
+
+ return ret;
+ }
+
+ for (i = 0; i < cfg->num_clks; i++) {
+ struct clk *clk;
+
+ clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
+ cfg->clk_names[i], ret);
+ return ret;
+ }
+
+ phy->clks[i] = clk;
+ }
+
+ return 0;
+}
+
+int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_cfg *cfg = phy->cfg;
+ struct device *dev = &phy->pdev->dev;
+ int i, ret = 0;
+
+ pm_runtime_get_sync(dev);
+
+ ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable regulators: (%d)\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < cfg->num_clks; i++) {
+ ret = clk_prepare_enable(phy->clks[i]);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
+ cfg->clk_names[i], ret);
+ }
+
+ return ret;
+}
+
+void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_cfg *cfg = phy->cfg;
+ struct device *dev = &phy->pdev->dev;
+ int i;
+
+ for (i = cfg->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(phy->clks[i]);
+
+ regulator_bulk_disable(cfg->num_regs, phy->regs);
+
+ pm_runtime_put_sync(dev);
+}
+
+void msm_hdmi_phy_powerup(struct hdmi_phy *phy, unsigned long int pixclock)
+{
+ if (!phy || !phy->cfg->powerup)
+ return;
+
+ phy->cfg->powerup(phy, pixclock);
+}
+
+void msm_hdmi_phy_powerdown(struct hdmi_phy *phy)
+{
+ if (!phy || !phy->cfg->powerdown)
+ return;
+
+ phy->cfg->powerdown(phy);
+}
+
+static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
+ enum hdmi_phy_type type)
+{
+ int ret;
+
+ switch (type) {
+ case MSM_HDMI_PHY_8960:
+ ret = msm_hdmi_pll_8960_init(pdev);
+ break;
+ case MSM_HDMI_PHY_8996:
+ ret = msm_hdmi_pll_8996_init(pdev);
+ break;
+ /*
+ * we don't have PLL support for these, don't report an error for now
+ */
+ case MSM_HDMI_PHY_8x60:
+ case MSM_HDMI_PHY_8x74:
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENODEV;
+
+ phy->cfg = (struct hdmi_phy_cfg *)of_device_get_match_data(dev);
+ if (!phy->cfg)
+ return -ENODEV;
+
+ phy->mmio = msm_ioremap(pdev, "hdmi_phy");
+ if (IS_ERR(phy->mmio)) {
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
+ return -ENOMEM;
+ }
+
+ phy->pdev = pdev;
+
+ ret = msm_hdmi_phy_resource_init(phy);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = msm_hdmi_phy_resource_enable(phy);
+ if (ret)
+ return ret;
+
+ ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "couldn't init PLL\n");
+ msm_hdmi_phy_resource_disable(phy);
+ return ret;
+ }
+
+ msm_hdmi_phy_resource_disable(phy);
+
+ platform_set_drvdata(pdev, phy);
+
+ return 0;
+}
+
+static int msm_hdmi_phy_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id msm_hdmi_phy_dt_match[] = {
+ { .compatible = "qcom,hdmi-phy-8660",
+ .data = &msm_hdmi_phy_8x60_cfg },
+ { .compatible = "qcom,hdmi-phy-8960",
+ .data = &msm_hdmi_phy_8960_cfg },
+ { .compatible = "qcom,hdmi-phy-8974",
+ .data = &msm_hdmi_phy_8x74_cfg },
+ { .compatible = "qcom,hdmi-phy-8084",
+ .data = &msm_hdmi_phy_8x74_cfg },
+ { .compatible = "qcom,hdmi-phy-8996",
+ .data = &msm_hdmi_phy_8996_cfg },
+ {}
+};
+
+static struct platform_driver msm_hdmi_phy_platform_driver = {
+ .probe = msm_hdmi_phy_probe,
+ .remove = msm_hdmi_phy_remove,
+ .driver = {
+ .name = "msm_hdmi_phy",
+ .of_match_table = msm_hdmi_phy_dt_match,
+ },
+};
+
+void __init msm_hdmi_phy_driver_register(void)
+{
+ platform_driver_register(&msm_hdmi_phy_platform_driver);
+}
+
+void __exit msm_hdmi_phy_driver_unregister(void)
+{
+ platform_driver_unregister(&msm_hdmi_phy_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 000000000..cf90a0c1f
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "hdmi.h"
+
+static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ DBG("pixclock: %lu", pixclock);
+
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG0, 0x1b);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG1, 0xf2);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG4, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG5, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG6, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG7, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG8, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG9, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG10, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG11, 0x00);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG3, 0x20);
+}
+
+static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
+{
+ DBG("");
+
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x7f);
+}
+
+static const char * const hdmi_phy_8960_reg_names[] = {
+ "core-vdda",
+};
+
+static const char * const hdmi_phy_8960_clk_names[] = {
+ "slave_iface",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
+ .type = MSM_HDMI_PHY_8960,
+ .powerup = hdmi_phy_8960_powerup,
+ .powerdown = hdmi_phy_8960_powerdown,
+ .reg_names = hdmi_phy_8960_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8960_reg_names),
+ .clk_names = hdmi_phy_8960_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8960_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
new file mode 100644
index 000000000..4dd055416
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "hdmi.h"
+
+#define HDMI_VCO_MAX_FREQ 12000000000UL
+#define HDMI_VCO_MIN_FREQ 8000000000UL
+
+#define HDMI_PCLK_MAX_FREQ 600000000
+#define HDMI_PCLK_MIN_FREQ 25000000
+
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL
+#define HDMI_CORECLK_DIV 5
+#define HDMI_DEFAULT_REF_CLOCK 19200000
+#define HDMI_PLL_CMP_CNT 1024
+
+#define HDMI_PLL_POLL_MAX_READS 100
+#define HDMI_PLL_POLL_TIMEOUT_US 150
+
+#define HDMI_NUM_TX_CHANNEL 4
+
+struct hdmi_pll_8996 {
+ struct platform_device *pdev;
+ struct clk_hw clk_hw;
+
+ /* pll mmio base */
+ void __iomem *mmio_qserdes_com;
+ /* tx channel base */
+ void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8996, clk_hw)
+
+struct hdmi_8996_phy_pll_reg_cfg {
+ u32 tx_lx_lane_mode[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
+ u32 com_svs_mode_clk_sel;
+ u32 com_hsclk_sel;
+ u32 com_pll_cctrl_mode0;
+ u32 com_pll_rctrl_mode0;
+ u32 com_cp_ctrl_mode0;
+ u32 com_dec_start_mode0;
+ u32 com_div_frac_start1_mode0;
+ u32 com_div_frac_start2_mode0;
+ u32 com_div_frac_start3_mode0;
+ u32 com_integloop_gain0_mode0;
+ u32 com_integloop_gain1_mode0;
+ u32 com_lock_cmp_en;
+ u32 com_lock_cmp1_mode0;
+ u32 com_lock_cmp2_mode0;
+ u32 com_lock_cmp3_mode0;
+ u32 com_core_clk_en;
+ u32 com_coreclk_div;
+ u32 com_vco_tune_ctrl;
+
+ u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_vmode_ctrl1[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_vmode_ctrl2[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_res_code_lane_tx[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_hp_pd_enables[HDMI_NUM_TX_CHANNEL];
+
+ u32 phy_mode;
+};
+
+struct hdmi_8996_post_divider {
+ u64 vco_freq;
+ int hsclk_divsel;
+ int vco_ratio;
+ int tx_band_sel;
+ int half_rate_mode;
+};
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8996 *pll)
+{
+ return platform_get_drvdata(pll->pdev);
+}
+
+static inline void hdmi_pll_write(struct hdmi_pll_8996 *pll, int offset,
+ u32 data)
+{
+ msm_writel(data, pll->mmio_qserdes_com + offset);
+}
+
+static inline u32 hdmi_pll_read(struct hdmi_pll_8996 *pll, int offset)
+{
+ return msm_readl(pll->mmio_qserdes_com + offset);
+}
+
+static inline void hdmi_tx_chan_write(struct hdmi_pll_8996 *pll, int channel,
+ int offset, int data)
+{
+ msm_writel(data, pll->mmio_qserdes_tx[channel] + offset);
+}
+
+static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
+ bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return (11000000 / (ref_clk / 20));
+
+ return 0x23;
+}
+
+static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x16;
+
+ return 0x10;
+}
+
+static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x28;
+
+ return 0x1;
+}
+
+static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+ bool gen_ssc)
+{
+ int digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+ u64 base;
+
+ if ((frac_start != 0) || gen_ssc)
+ base = (64 * ref_clk) / HDMI_DEFAULT_REF_CLOCK;
+ else
+ base = (1022 * ref_clk) / 100;
+
+ base <<= digclk_divsel;
+
+ return (base <= 2046 ? base : 2046);
+}
+
+static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+{
+ u64 dividend = HDMI_PLL_CMP_CNT * fdata;
+ u32 divisor = ref_clk * 10;
+ u32 rem;
+
+ rem = do_div(dividend, divisor);
+ if (rem > (divisor >> 1))
+ dividend++;
+
+ return dividend - 1;
+}
+
+static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
+{
+ u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
+
+ do_div(fdata, HDMI_PLL_CMP_CNT);
+
+ return fdata;
+}
+
+static int pll_get_post_div(struct hdmi_8996_post_divider *pd, u64 bclk)
+{
+ int ratio[] = { 2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35 };
+ int hs_divsel[] = { 0, 4, 8, 12, 1, 5, 2, 9, 3, 13, 10, 7, 14, 11, 15 };
+ int tx_band_sel[] = { 0, 1, 2, 3 };
+ u64 vco_freq[60];
+ u64 vco, vco_optimal;
+ int half_rate_mode = 0;
+ int vco_optimal_index, vco_freq_index;
+ int i, j;
+
+retry:
+ vco_optimal = HDMI_VCO_MAX_FREQ;
+ vco_optimal_index = -1;
+ vco_freq_index = 0;
+ for (i = 0; i < 15; i++) {
+ for (j = 0; j < 4; j++) {
+ u32 ratio_mult = ratio[i] << tx_band_sel[j];
+
+ vco = bclk >> half_rate_mode;
+ vco *= ratio_mult;
+ vco_freq[vco_freq_index++] = vco;
+ }
+ }
+
+ for (i = 0; i < 60; i++) {
+ u64 vco_tmp = vco_freq[i];
+
+ if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
+ (vco_tmp <= vco_optimal)) {
+ vco_optimal = vco_tmp;
+ vco_optimal_index = i;
+ }
+ }
+
+ if (vco_optimal_index == -1) {
+ if (!half_rate_mode) {
+ half_rate_mode = 1;
+ goto retry;
+ }
+ } else {
+ pd->vco_freq = vco_optimal;
+ pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
+ pd->vco_ratio = ratio[vco_optimal_index / 4];
+ pd->hsclk_divsel = hs_divsel[vco_optimal_index / 4];
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ struct hdmi_8996_post_divider pd;
+ u64 bclk;
+ u64 tmds_clk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 fdata;
+ u32 pll_divisor;
+ u32 rem;
+ u32 cpctrl;
+ u32 rctrl;
+ u32 cctrl;
+ u32 integloop_gain;
+ u32 pll_cmp;
+ int i, ret;
+
+ /* bit clk = 10 * pix_clk */
+ bclk = ((u64)pix_clk) * 10;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = pix_clk >> 2;
+ else
+ tmds_clk = pix_clk;
+
+ ret = pll_get_post_div(&pd, bclk);
+ if (ret)
+ return ret;
+
+ dec_start = pd.vco_freq;
+ pll_divisor = 4 * ref_clk;
+ do_div(dec_start, pll_divisor);
+
+ frac_start = pd.vco_freq * (1 << 20);
+
+ rem = do_div(frac_start, pll_divisor);
+ frac_start -= dec_start * (1 << 20);
+ if (rem > (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
+ rctrl = pll_get_rctrl(frac_start, false);
+ cctrl = pll_get_cctrl(frac_start, false);
+ integloop_gain = pll_get_integloop_gain(frac_start, bclk,
+ ref_clk, false);
+
+ fdata = pd.vco_freq;
+ do_div(fdata, pd.vco_ratio);
+
+ pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
+
+ DBG("VCO freq: %llu", pd.vco_freq);
+ DBG("fdata: %llu", fdata);
+ DBG("pix_clk: %lu", pix_clk);
+ DBG("tmds clk: %llu", tmds_clk);
+ DBG("HSCLK_SEL: %d", pd.hsclk_divsel);
+ DBG("DEC_START: %llu", dec_start);
+ DBG("DIV_FRAC_START: %llu", frac_start);
+ DBG("PLL_CPCTRL: %u", cpctrl);
+ DBG("PLL_RCTRL: %u", rctrl);
+ DBG("PLL_CCTRL: %u", cctrl);
+ DBG("INTEGLOOP_GAIN: %u", integloop_gain);
+ DBG("TX_BAND: %d", pd.tx_band_sel);
+ DBG("PLL_CMP: %u", pll_cmp);
+
+ /* Convert these values to register specific values */
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_lock_cmp_en = 0x0;
+ cfg->com_core_clk_en = 0x2c;
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ cfg->com_vco_tune_ctrl = 0x0;
+
+ cfg->tx_lx_lane_mode[0] =
+ cfg->tx_lx_lane_mode[2] = 0x43;
+
+ cfg->tx_lx_hp_pd_enables[0] =
+ cfg->tx_lx_hp_pd_enables[1] =
+ cfg->tx_lx_hp_pd_enables[2] = 0x0c;
+ cfg->tx_lx_hp_pd_enables[3] = 0x3;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+ cfg->tx_lx_tx_band[i] = pd.tx_band_sel + 4;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] =
+ cfg->tx_lx_tx_drv_lvl[1] =
+ cfg->tx_lx_tx_drv_lvl[2] = 0x25;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x22;
+
+ cfg->tx_lx_tx_emp_post1_lvl[0] =
+ cfg->tx_lx_tx_emp_post1_lvl[1] =
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x23;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x27;
+
+ cfg->tx_lx_vmode_ctrl1[0] =
+ cfg->tx_lx_vmode_ctrl1[1] =
+ cfg->tx_lx_vmode_ctrl1[2] =
+ cfg->tx_lx_vmode_ctrl1[3] = 0x00;
+
+ cfg->tx_lx_vmode_ctrl2[0] =
+ cfg->tx_lx_vmode_ctrl2[1] =
+ cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
+
+ cfg->tx_lx_vmode_ctrl2[3] = 0x00;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ cfg->tx_lx_tx_drv_lvl[i] = 0x25;
+ cfg->tx_lx_tx_emp_post1_lvl[i] = 0x23;
+ cfg->tx_lx_vmode_ctrl1[i] = 0x00;
+ }
+
+ cfg->tx_lx_vmode_ctrl2[0] =
+ cfg->tx_lx_vmode_ctrl2[1] =
+ cfg->tx_lx_vmode_ctrl2[2] = 0x0D;
+ cfg->tx_lx_vmode_ctrl2[3] = 0x00;
+ } else {
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ cfg->tx_lx_tx_drv_lvl[i] = 0x20;
+ cfg->tx_lx_tx_emp_post1_lvl[i] = 0x20;
+ cfg->tx_lx_vmode_ctrl1[i] = 0x00;
+ cfg->tx_lx_vmode_ctrl2[i] = 0x0E;
+ }
+ }
+
+ DBG("com_svs_mode_clk_sel = 0x%x", cfg->com_svs_mode_clk_sel);
+ DBG("com_hsclk_sel = 0x%x", cfg->com_hsclk_sel);
+ DBG("com_lock_cmp_en = 0x%x", cfg->com_lock_cmp_en);
+ DBG("com_pll_cctrl_mode0 = 0x%x", cfg->com_pll_cctrl_mode0);
+ DBG("com_pll_rctrl_mode0 = 0x%x", cfg->com_pll_rctrl_mode0);
+ DBG("com_cp_ctrl_mode0 = 0x%x", cfg->com_cp_ctrl_mode0);
+ DBG("com_dec_start_mode0 = 0x%x", cfg->com_dec_start_mode0);
+ DBG("com_div_frac_start1_mode0 = 0x%x", cfg->com_div_frac_start1_mode0);
+ DBG("com_div_frac_start2_mode0 = 0x%x", cfg->com_div_frac_start2_mode0);
+ DBG("com_div_frac_start3_mode0 = 0x%x", cfg->com_div_frac_start3_mode0);
+ DBG("com_integloop_gain0_mode0 = 0x%x", cfg->com_integloop_gain0_mode0);
+ DBG("com_integloop_gain1_mode0 = 0x%x", cfg->com_integloop_gain1_mode0);
+ DBG("com_lock_cmp1_mode0 = 0x%x", cfg->com_lock_cmp1_mode0);
+ DBG("com_lock_cmp2_mode0 = 0x%x", cfg->com_lock_cmp2_mode0);
+ DBG("com_lock_cmp3_mode0 = 0x%x", cfg->com_lock_cmp3_mode0);
+ DBG("com_core_clk_en = 0x%x", cfg->com_core_clk_en);
+ DBG("com_coreclk_div = 0x%x", cfg->com_coreclk_div);
+ DBG("phy_mode = 0x%x", cfg->phy_mode);
+
+ DBG("tx_l0_lane_mode = 0x%x", cfg->tx_lx_lane_mode[0]);
+ DBG("tx_l2_lane_mode = 0x%x", cfg->tx_lx_lane_mode[2]);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ DBG("tx_l%d_tx_band = 0x%x", i, cfg->tx_lx_tx_band[i]);
+ DBG("tx_l%d_tx_drv_lvl = 0x%x", i, cfg->tx_lx_tx_drv_lvl[i]);
+ DBG("tx_l%d_tx_emp_post1_lvl = 0x%x", i,
+ cfg->tx_lx_tx_emp_post1_lvl[i]);
+ DBG("tx_l%d_vmode_ctrl1 = 0x%x", i, cfg->tx_lx_vmode_ctrl1[i]);
+ DBG("tx_l%d_vmode_ctrl2 = 0x%x", i, cfg->tx_lx_vmode_ctrl2[i]);
+ }
+
+ return 0;
+}
+
+static int hdmi_8996_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ struct hdmi_8996_phy_pll_reg_cfg cfg;
+ int i, ret;
+
+ memset(&cfg, 0x00, sizeof(cfg));
+
+ ret = pll_calculate(rate, parent_rate, &cfg);
+ if (ret) {
+ DRM_ERROR("PLL calculation failed\n");
+ return ret;
+ }
+
+ /* Initially shut down PHY */
+ DBG("Disabling PHY");
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ /* Power up sequence */
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x04);
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX0_TX1_LANE_CTL, 0x0F);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_TX2_TX3_LANE_CTL, 0x0F);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_CLKBUF_ENABLE,
+ 0x03);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_BAND,
+ cfg.tx_lx_tx_band[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_RESET_TSYNC_EN,
+ 0x03);
+ }
+
+ hdmi_tx_chan_write(pll, 0, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
+ cfg.tx_lx_lane_mode[0]);
+ hdmi_tx_chan_write(pll, 2, REG_HDMI_PHY_QSERDES_TX_LX_LANE_MODE,
+ cfg.tx_lx_lane_mode[2]);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
+
+ /* Bypass VCO calibration */
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_TRIM, 0x0F);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_IVCO, 0x0F);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_CTRL,
+ cfg.com_vco_tune_ctrl);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_BG_CTRL, 0x06);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CLK_SELECT, 0x30);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_HSCLK_SEL,
+ cfg.com_hsclk_sel);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP_EN,
+ cfg.com_lock_cmp_en);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
+ cfg.com_pll_cctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
+ cfg.com_pll_rctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CP_CTRL_MODE0,
+ cfg.com_cp_ctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+ cfg.com_integloop_gain0_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+ cfg.com_integloop_gain1_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
+ cfg.com_lock_cmp1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
+ cfg.com_lock_cmp2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
+ cfg.com_lock_cmp3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORE_CLK_EN,
+ cfg.com_core_clk_en);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CORECLK_DIV,
+ cfg.com_coreclk_div);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_CMN_CONFIG, 0x02);
+
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+
+ /* TX lanes setup (TX 0/1/2/3) */
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL,
+ cfg.tx_lx_tx_drv_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_EMP_POST1_LVL,
+ cfg.tx_lx_tx_emp_post1_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL1,
+ cfg.tx_lx_vmode_ctrl1[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_VMODE_CTRL2,
+ cfg.tx_lx_vmode_ctrl2[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TX_DRV_LVL_OFFSET,
+ 0x00);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_RES_CODE_LANE_OFFSET,
+ 0x00);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_TRAN_DRVR_EMP_EN,
+ 0x03);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_PARRATE_REC_DETECT_IDLE_EN,
+ 0x40);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_HP_PD_ENABLES,
+ cfg.tx_lx_hp_pd_enables[i]);
+ }
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_MODE, cfg.phy_mode);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_PD_CTL, 0x1F);
+
+ /*
+ * Ensure that vco configuration gets flushed to hardware before
+ * enabling the PLL
+ */
+ wmb();
+
+ return 0;
+}
+
+static int hdmi_8996_phy_ready_status(struct hdmi_phy *phy)
+{
+ u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ u32 status;
+ int phy_ready = 0;
+
+ DBG("Waiting for PHY ready");
+
+ while (nb_tries--) {
+ status = hdmi_phy_read(phy, REG_HDMI_8996_PHY_STATUS);
+ phy_ready = status & BIT(0);
+
+ if (phy_ready)
+ break;
+
+ udelay(timeout);
+ }
+
+ DBG("PHY is %sready", phy_ready ? "" : "*not* ");
+
+ return phy_ready;
+}
+
+static int hdmi_8996_pll_lock_status(struct hdmi_pll_8996 *pll)
+{
+ u32 status;
+ int nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ int pll_locked = 0;
+
+ DBG("Waiting for PLL lock");
+
+ while (nb_tries--) {
+ status = hdmi_pll_read(pll,
+ REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout);
+ }
+
+ DBG("HDMI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+static int hdmi_8996_pll_prepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ int i, ret = 0;
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x1);
+ udelay(100);
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
+ udelay(100);
+
+ ret = hdmi_8996_pll_lock_status(pll);
+ if (!ret)
+ return ret;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_PHY_QSERDES_TX_LX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+
+ /* Disable SSC */
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER1, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_PER2, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE1, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_STEP_SIZE2, 0x0);
+ hdmi_pll_write(pll, REG_HDMI_PHY_QSERDES_COM_SSC_EN_CENTER, 0x2);
+
+ ret = hdmi_8996_phy_ready_status(phy);
+ if (!ret)
+ return ret;
+
+ /* Restart the retiming buffer */
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x18);
+ udelay(1);
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x19);
+
+ return 0;
+}
+
+static long hdmi_8996_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ if (rate < HDMI_PCLK_MIN_FREQ)
+ return HDMI_PCLK_MIN_FREQ;
+ else if (rate > HDMI_PCLK_MAX_FREQ)
+ return HDMI_PCLK_MAX_FREQ;
+ else
+ return rate;
+}
+
+static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ u64 fdata;
+ u32 cmp1, cmp2, cmp3, pll_cmp;
+
+ cmp1 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP1_MODE0);
+ cmp2 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP2_MODE0);
+ cmp3 = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_LOCK_CMP3_MODE0);
+
+ pll_cmp = cmp1 | (cmp2 << 8) | (cmp3 << 16);
+
+ fdata = pll_cmp_to_fdata(pll_cmp + 1, parent_rate);
+
+ do_div(fdata, 10);
+
+ return fdata;
+}
+
+static void hdmi_8996_pll_unprepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+
+ hdmi_phy_write(phy, REG_HDMI_8996_PHY_CFG, 0x6);
+ usleep_range(100, 150);
+}
+
+static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
+{
+ struct hdmi_pll_8996 *pll = hw_clk_to_pll(hw);
+ u32 status;
+ int pll_locked;
+
+ status = hdmi_pll_read(pll, REG_HDMI_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ return pll_locked;
+}
+
+static const struct clk_ops hdmi_8996_pll_ops = {
+ .set_rate = hdmi_8996_pll_set_clk_rate,
+ .round_rate = hdmi_8996_pll_round_rate,
+ .recalc_rate = hdmi_8996_pll_recalc_rate,
+ .prepare = hdmi_8996_pll_prepare,
+ .unprepare = hdmi_8996_pll_unprepare,
+ .is_enabled = hdmi_8996_pll_is_enabled,
+};
+
+static const struct clk_init_data pll_init = {
+ .name = "hdmipll",
+ .ops = &hdmi_8996_pll_ops,
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
+};
+
+int msm_hdmi_pll_8996_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_pll_8996 *pll;
+ int i, ret;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ pll->pdev = pdev;
+
+ pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll");
+ if (IS_ERR(pll->mmio_qserdes_com)) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ char name[32];
+
+ snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
+
+ pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name);
+ if (IS_ERR(pll->mmio_qserdes_tx[i])) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+ }
+ pll->clk_hw.init = &pll_init;
+
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const char * const hdmi_phy_8996_reg_names[] = {
+ "vddio",
+ "vcca",
+};
+
+static const char * const hdmi_phy_8996_clk_names[] = {
+ "iface", "ref",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
+ .type = MSM_HDMI_PHY_8996,
+ .reg_names = hdmi_phy_8996_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8996_reg_names),
+ .clk_names = hdmi_phy_8996_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8996_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 000000000..1d97640d8
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/delay.h>
+
+#include "hdmi.h"
+
+static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ /* De-serializer delay D/C for non-lbk mode: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG0,
+ HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
+
+ if (pixclock == 27000000) {
+ /* video_format == HDMI_VFRMT_720x480p60_16_9 */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
+ } else {
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
+ }
+
+ /* No matter what, start from the power down mode: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PowerGen on: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PLL power on: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Write to HIGH after PLL power down de-assert: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3,
+ HDMI_8x60_PHY_REG3_PLL_ENABLE);
+
+ /* ASIC power on; PHY REG9 = 0 */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
+
+ /* Enable PLL lock detect, PLL lock det will go high after lock
+ * Enable the re-time logic
+ */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
+
+ /* Drivers are on: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* If the RX detector is needed: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG4, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG5, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG6, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG7, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG8, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG9, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG10, 0);
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG11, 0);
+
+ /* If we want to use lock enable based on counting: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
+ HDMI_8x60_PHY_REG12_FORCE_LOCK);
+}
+
+static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
+{
+ /* Assert RESET PHY from controller */
+ hdmi_phy_write(phy, REG_HDMI_PHY_CTRL,
+ HDMI_PHY_CTRL_SW_RESET);
+ udelay(10);
+ /* De-assert RESET PHY from controller */
+ hdmi_phy_write(phy, REG_HDMI_PHY_CTRL, 0);
+ /* Turn off Driver */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+ udelay(10);
+ /* Disable PLL */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG3, 0);
+ /* Power down PHY, but keep RX-sense: */
+ hdmi_phy_write(phy, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+}
+
+static const char * const hdmi_phy_8x60_reg_names[] = {
+ "core-vdda",
+};
+
+static const char * const hdmi_phy_8x60_clk_names[] = {
+ "slave_iface",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg = {
+ .type = MSM_HDMI_PHY_8x60,
+ .powerup = hdmi_phy_8x60_powerup,
+ .powerdown = hdmi_phy_8x60_powerdown,
+ .reg_names = hdmi_phy_8x60_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8x60_reg_names),
+ .clk_names = hdmi_phy_8x60_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8x60_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
new file mode 100644
index 000000000..a2a6940e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "hdmi.h"
+
+static void hdmi_phy_8x74_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG0, 0x1b);
+ hdmi_phy_write(phy, REG_HDMI_8x74_ANA_CFG1, 0xf2);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_CFG0, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN0, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN1, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN2, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_BIST_PATN3, 0x0);
+ hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL1, 0x20);
+}
+
+static void hdmi_phy_8x74_powerdown(struct hdmi_phy *phy)
+{
+ hdmi_phy_write(phy, REG_HDMI_8x74_PD_CTRL0, 0x7f);
+}
+
+static const char * const hdmi_phy_8x74_reg_names[] = {
+ "core-vdda",
+ "vddio",
+};
+
+static const char * const hdmi_phy_8x74_clk_names[] = {
+ "iface", "alt_iface"
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
+ .type = MSM_HDMI_PHY_8x74,
+ .powerup = hdmi_phy_8x74_powerup,
+ .powerdown = hdmi_phy_8x74_powerdown,
+ .reg_names = hdmi_phy_8x74_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8x74_reg_names),
+ .clk_names = hdmi_phy_8x74_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8x74_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
new file mode 100644
index 000000000..be4b0b67e
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "hdmi.h"
+
+struct hdmi_pll_8960 {
+ struct platform_device *pdev;
+ struct clk_hw clk_hw;
+ void __iomem *mmio;
+
+ unsigned long pixclk;
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8960, clk_hw)
+
+/*
+ * HDMI PLL:
+ *
+ * To get the parent clock setup properly, we need to plug in hdmi pll
+ * configuration into common-clock-framework.
+ */
+
+struct pll_rate {
+ unsigned long rate;
+ int num_reg;
+ struct {
+ u32 val;
+ u32 reg;
+ } conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+ { 154000000, 14, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4d, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x5e, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x42, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ /* 1080p60/1080p50 case */
+ { 148500000, 27, {
+ { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+ { 108000000, 13, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
+ { 74250000, 8, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ }
+ },
+ { 74176000, 14, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0xe5, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0c, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7d, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xbc, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ { 65000000, 14, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ }
+ },
+ /* 480p60/480i60 */
+ { 27030000, 18, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+ /* 576p50/576i50 */
+ { 27000000, 27, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+ /* 640x480p60 */
+ { 25200000, 27, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ }
+ },
+};
+
+static inline void pll_write(struct hdmi_pll_8960 *pll, u32 reg, u32 data)
+{
+ msm_writel(data, pll->mmio + reg);
+}
+
+static inline u32 pll_read(struct hdmi_pll_8960 *pll, u32 reg)
+{
+ return msm_readl(pll->mmio + reg);
+}
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8960 *pll)
+{
+ return platform_get_drvdata(pll->pdev);
+}
+
+static int hdmi_pll_enable(struct clk_hw *hw)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ int timeout_count, pll_lock_retry = 10;
+ unsigned int val;
+
+ DBG("");
+
+ /* Assert PLL S/W reset */
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
+
+ /* Wait for a short time before de-asserting
+ * to allow the hardware to complete its job.
+ * This much of delay should be fine for hardware
+ * to assert and de-assert.
+ */
+ udelay(10);
+
+ /* De-assert PLL S/W reset */
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ /* Assert PHY S/W reset */
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+ val &= ~HDMI_8960_PHY_REG12_SW_RESET;
+ /*
+ * Wait for a short time before de-asserting to allow the hardware to
+ * complete its job. This much of delay should be fine for hardware to
+ * assert and de-assert.
+ */
+ udelay(10);
+ /* De-assert PHY S/W reset */
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x3f);
+
+ val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+ /* Wait 10 us for enabling global power for PHY */
+ mb();
+ udelay(10);
+
+ val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
+ val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG2, 0x80);
+
+ timeout_count = 1000;
+ while (--pll_lock_retry > 0) {
+ /* are we there yet? */
+ val = pll_read(pll, REG_HDMI_8960_PHY_PLL_STATUS0);
+ if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
+ break;
+
+ udelay(1);
+
+ if (--timeout_count > 0)
+ continue;
+
+ /*
+ * PLL has still not locked.
+ * Do a software reset and try again
+ * Assert PLL S/W reset first
+ */
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ udelay(10);
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ /*
+ * Wait for a short duration for the PLL calibration
+ * before checking if the PLL gets locked
+ */
+ udelay(350);
+
+ timeout_count = 1000;
+ }
+
+ return 0;
+}
+
+static void hdmi_pll_disable(struct clk_hw *hw)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ unsigned int val;
+
+ DBG("");
+
+ val = hdmi_phy_read(phy, REG_HDMI_8960_PHY_REG12);
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_phy_write(phy, REG_HDMI_8960_PHY_REG12, val);
+
+ val = pll_read(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ pll_write(pll, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ /* Make sure HDMI PHY/PLL are powered down */
+ mb();
+}
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+ if (rate > freqtbl[i].rate)
+ return &freqtbl[i - 1];
+
+ return &freqtbl[i - 1];
+}
+
+static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+
+ return pll->pixclk;
+}
+
+static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pll_rate *pll_rate = find_rate(rate);
+
+ return pll_rate->rate;
+}
+
+static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8960 *pll = hw_clk_to_pll(hw);
+ const struct pll_rate *pll_rate = find_rate(rate);
+ int i;
+
+ DBG("rate=%lu", rate);
+
+ for (i = 0; i < pll_rate->num_reg; i++)
+ pll_write(pll, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+ pll->pixclk = rate;
+
+ return 0;
+}
+
+static const struct clk_ops hdmi_pll_ops = {
+ .enable = hdmi_pll_enable,
+ .disable = hdmi_pll_disable,
+ .recalc_rate = hdmi_pll_recalc_rate,
+ .round_rate = hdmi_pll_round_rate,
+ .set_rate = hdmi_pll_set_rate,
+};
+
+static const char * const hdmi_pll_parents[] = {
+ "pxo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "hdmi_pll",
+ .ops = &hdmi_pll_ops,
+ .parent_names = hdmi_pll_parents,
+ .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .flags = CLK_IGNORE_UNUSED,
+};
+
+int msm_hdmi_pll_8960_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_pll_8960 *pll;
+ struct clk *clk;
+ int i;
+
+ /* sanity check: */
+ for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
+ if (WARN_ON(freqtbl[i].rate < freqtbl[i + 1].rate))
+ return -EINVAL;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ pll->mmio = msm_ioremap(pdev, "hdmi_pll");
+ if (IS_ERR(pll->mmio)) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+
+ pll->pdev = pdev;
+ pll->clk_hw.init = &pll_init;
+
+ clk = devm_clk_register(dev, &pll->clk_hw);
+ if (IS_ERR(clk)) {
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 000000000..3edc698c4
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,61 @@
+#ifndef QFPROM_XML
+#define QFPROM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22)
+- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22)
+
+Copyright (C) 2013-2021 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
+#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
+#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
+
+
+#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
new file mode 100644
index 000000000..1686fbb61
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_vblank.h>
+
+#include "msm_atomic_trace.h"
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_kms.h"
+
+/*
+ * Helpers to control vblanks while we flush.. basically just to ensure
+ * that vblank accounting is switched on, so we get valid seqn/timestamp
+ * on pageflip events (if requested)
+ */
+
+static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+ drm_crtc_vblank_get(crtc);
+ }
+}
+
+static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
+{
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+ if (!crtc->state->active)
+ continue;
+ drm_crtc_vblank_put(crtc);
+ }
+}
+
+static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
+{
+ int crtc_index;
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+ crtc_index = drm_crtc_index(crtc);
+ mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
+ }
+}
+
+static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
+{
+ struct drm_crtc *crtc;
+
+ for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask)
+ mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]);
+}
+
+static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
+{
+ unsigned crtc_mask = BIT(crtc_idx);
+
+ trace_msm_atomic_async_commit_start(crtc_mask);
+
+ lock_crtcs(kms, crtc_mask);
+
+ if (!(kms->pending_crtc_mask & crtc_mask)) {
+ unlock_crtcs(kms, crtc_mask);
+ goto out;
+ }
+
+ kms->pending_crtc_mask &= ~crtc_mask;
+
+ kms->funcs->enable_commit(kms);
+
+ vblank_get(kms, crtc_mask);
+
+ /*
+ * Flush hardware updates:
+ */
+ trace_msm_atomic_flush_commit(crtc_mask);
+ kms->funcs->flush_commit(kms, crtc_mask);
+
+ /*
+ * Wait for flush to complete:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ vblank_put(kms, crtc_mask);
+
+ kms->funcs->complete_commit(kms, crtc_mask);
+ unlock_crtcs(kms, crtc_mask);
+ kms->funcs->disable_commit(kms);
+
+out:
+ trace_msm_atomic_async_commit_finish(crtc_mask);
+}
+
+static void msm_atomic_pending_work(struct kthread_work *work)
+{
+ struct msm_pending_timer *timer = container_of(work,
+ struct msm_pending_timer, work.work);
+
+ msm_atomic_async_commit(timer->kms, timer->crtc_idx);
+}
+
+int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
+ struct msm_kms *kms, int crtc_idx)
+{
+ timer->kms = kms;
+ timer->crtc_idx = crtc_idx;
+
+ timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
+ if (IS_ERR(timer->worker)) {
+ int ret = PTR_ERR(timer->worker);
+ timer->worker = NULL;
+ return ret;
+ }
+ sched_set_fifo(timer->worker->task);
+
+ msm_hrtimer_work_init(&timer->work, timer->worker,
+ msm_atomic_pending_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ return 0;
+}
+
+void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer)
+{
+ if (timer->worker)
+ kthread_destroy_worker(timer->worker);
+}
+
+static bool can_do_async(struct drm_atomic_state *state,
+ struct drm_crtc **async_crtc)
+{
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, num_crtcs = 0;
+
+ if (!(state->legacy_cursor_update || state->async_update))
+ return false;
+
+ /* any connector change, means slow path: */
+ for_each_new_connector_in_state(state, connector, connector_state, i)
+ return false;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+ if (++num_crtcs > 1)
+ return false;
+ *async_crtc = crtc;
+ }
+
+ return true;
+}
+
+/* Get bitmask of crtcs that will need to be flushed. The bitmask
+ * can be used with for_each_crtc_mask() iterator, to iterate
+ * effected crtcs without needing to preserve the atomic state.
+ */
+static unsigned get_crtc_mask(struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ unsigned i, mask = 0;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i)
+ mask |= drm_crtc_mask(crtc);
+
+ return mask;
+}
+
+void msm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct drm_crtc *async_crtc = NULL;
+ unsigned crtc_mask = get_crtc_mask(state);
+ bool async = kms->funcs->vsync_time &&
+ can_do_async(state, &async_crtc);
+
+ trace_msm_atomic_commit_tail_start(async, crtc_mask);
+
+ kms->funcs->enable_commit(kms);
+
+ /*
+ * Ensure any previous (potentially async) commit has
+ * completed:
+ */
+ lock_crtcs(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ /*
+ * Now that there is no in-progress flush, prepare the
+ * current update:
+ */
+ kms->funcs->prepare_commit(kms, state);
+
+ /*
+ * Push atomic updates down to hardware:
+ */
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+ drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ if (async) {
+ struct msm_pending_timer *timer =
+ &kms->pending_timers[drm_crtc_index(async_crtc)];
+
+ /* async updates are limited to single-crtc updates: */
+ WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
+
+ /*
+ * Start timer if we don't already have an update pending
+ * on this crtc:
+ */
+ if (!(kms->pending_crtc_mask & crtc_mask)) {
+ ktime_t vsync_time, wakeup_time;
+
+ kms->pending_crtc_mask |= crtc_mask;
+
+ vsync_time = kms->funcs->vsync_time(kms, async_crtc);
+ wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
+
+ msm_hrtimer_queue_work(&timer->work, wakeup_time,
+ HRTIMER_MODE_ABS);
+ }
+
+ kms->funcs->disable_commit(kms);
+ unlock_crtcs(kms, crtc_mask);
+ /*
+ * At this point, from drm core's perspective, we
+ * are done with the atomic update, so we can just
+ * go ahead and signal that it is done:
+ */
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ trace_msm_atomic_commit_tail_finish(async, crtc_mask);
+
+ return;
+ }
+
+ /*
+ * If there is any async flush pending on updated crtcs, fold
+ * them into the current flush.
+ */
+ kms->pending_crtc_mask &= ~crtc_mask;
+
+ vblank_get(kms, crtc_mask);
+
+ /*
+ * Flush hardware updates:
+ */
+ trace_msm_atomic_flush_commit(crtc_mask);
+ kms->funcs->flush_commit(kms, crtc_mask);
+ unlock_crtcs(kms, crtc_mask);
+ /*
+ * Wait for flush to complete:
+ */
+ trace_msm_atomic_wait_flush_start(crtc_mask);
+ kms->funcs->wait_flush(kms, crtc_mask);
+ trace_msm_atomic_wait_flush_finish(crtc_mask);
+
+ vblank_put(kms, crtc_mask);
+
+ lock_crtcs(kms, crtc_mask);
+ kms->funcs->complete_commit(kms, crtc_mask);
+ unlock_crtcs(kms, crtc_mask);
+ kms->funcs->disable_commit(kms);
+
+ drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ trace_msm_atomic_commit_tail_finish(async, crtc_mask);
+}
diff --git a/drivers/gpu/drm/msm/msm_atomic_trace.h b/drivers/gpu/drm/msm/msm_atomic_trace.h
new file mode 100644
index 000000000..b4ca0ed3b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic_trace.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm_atomic
+#define TRACE_INCLUDE_FILE msm_atomic_trace
+
+TRACE_EVENT(msm_atomic_commit_tail_start,
+ TP_PROTO(bool async, unsigned crtc_mask),
+ TP_ARGS(async, crtc_mask),
+ TP_STRUCT__entry(
+ __field(bool, async)
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->async = async;
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("async=%d crtc_mask=%x",
+ __entry->async, __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_commit_tail_finish,
+ TP_PROTO(bool async, unsigned crtc_mask),
+ TP_ARGS(async, crtc_mask),
+ TP_STRUCT__entry(
+ __field(bool, async)
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->async = async;
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("async=%d crtc_mask=%x",
+ __entry->async, __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_async_commit_start,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_async_commit_finish,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_wait_flush_start,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_wait_flush_finish,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+TRACE_EVENT(msm_atomic_flush_commit,
+ TP_PROTO(unsigned crtc_mask),
+ TP_ARGS(crtc_mask),
+ TP_STRUCT__entry(
+ __field(u32, crtc_mask)
+ ),
+ TP_fast_assign(
+ __entry->crtc_mask = crtc_mask;
+ ),
+ TP_printk("crtc_mask=%x",
+ __entry->crtc_mask)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_atomic_tracepoints.c b/drivers/gpu/drm/msm/msm_atomic_tracepoints.c
new file mode 100644
index 000000000..011dc881f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic_tracepoints.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
+#define CREATE_TRACE_POINTS
+#include "msm_atomic_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
new file mode 100644
index 000000000..95f4374ae
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_kms.h"
+#include "msm_debugfs.h"
+#include "disp/msm_disp_snapshot.h"
+
+/*
+ * GPU Snapshot:
+ */
+
+struct msm_gpu_show_priv {
+ struct msm_gpu_state *state;
+ struct drm_device *dev;
+};
+
+static int msm_gpu_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&gpu->lock);
+ if (ret)
+ return ret;
+
+ drm_printf(&p, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, show_priv->state, &p);
+
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+static int msm_gpu_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ mutex_lock(&gpu->lock);
+ gpu->funcs->gpu_state_put(show_priv->state);
+ mutex_unlock(&gpu->lock);
+
+ kfree(show_priv);
+
+ return single_release(inode, file);
+}
+
+static int msm_gpu_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_show_priv *show_priv;
+ int ret;
+
+ if (!gpu || !gpu->funcs->gpu_state_get)
+ return -ENODEV;
+
+ show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
+ if (!show_priv)
+ return -ENOMEM;
+
+ ret = mutex_lock_interruptible(&gpu->lock);
+ if (ret)
+ goto free_priv;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_hw_init(gpu);
+ show_priv->state = gpu->funcs->gpu_state_get(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ mutex_unlock(&gpu->lock);
+
+ if (IS_ERR(show_priv->state)) {
+ ret = PTR_ERR(show_priv->state);
+ goto free_priv;
+ }
+
+ show_priv->dev = dev;
+
+ ret = single_open(file, msm_gpu_show, show_priv);
+ if (ret)
+ goto free_priv;
+
+ return 0;
+
+free_priv:
+ kfree(show_priv);
+ return ret;
+}
+
+static const struct file_operations msm_gpu_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_gpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_gpu_release,
+};
+
+/*
+ * Display Snapshot:
+ */
+
+static int msm_kms_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_disp_state *state = m->private;
+
+ msm_disp_state_print(state, &p);
+
+ return 0;
+}
+
+static int msm_kms_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct msm_disp_state *state = m->private;
+
+ msm_disp_state_free(state);
+
+ return single_release(inode, file);
+}
+
+static int msm_kms_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_disp_state *state;
+ int ret;
+
+ if (!priv->kms)
+ return -ENODEV;
+
+ ret = mutex_lock_interruptible(&priv->kms->dump_mutex);
+ if (ret)
+ return ret;
+
+ state = msm_disp_snapshot_state_sync(priv->kms);
+
+ mutex_unlock(&priv->kms->dump_mutex);
+
+ if (IS_ERR(state)) {
+ return PTR_ERR(state);
+ }
+
+ ret = single_open(file, msm_kms_show, state);
+ if (ret) {
+ msm_disp_state_free(state);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct file_operations msm_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_kms_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_kms_release,
+};
+
+/*
+ * Other debugfs:
+ */
+
+static unsigned long last_shrink_freed;
+
+static int
+shrink_get(void *data, u64 *val)
+{
+ *val = last_shrink_freed;
+
+ return 0;
+}
+
+static int
+shrink_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+
+ last_shrink_freed = msm_gem_shrinker_shrink(dev, val);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
+ shrink_get, shrink_set,
+ "0x%08llx\n");
+
+
+static int msm_gem_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret;
+
+ ret = mutex_lock_interruptible(&priv->obj_lock);
+ if (ret)
+ return ret;
+
+ msm_gem_describe_objects(&priv->objects, m);
+
+ mutex_unlock(&priv->obj_lock);
+
+ return 0;
+}
+
+static int msm_mm_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
+
+ return 0;
+}
+
+static int msm_fb_show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (priv->fbdev) {
+ seq_printf(m, "fbcon ");
+ fbdev_fb = priv->fbdev->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static struct drm_info_list msm_debugfs_list[] = {
+ {"gem", msm_gem_show},
+ { "mm", msm_mm_show },
+ { "fb", msm_fb_show },
+};
+
+static int late_init_minor(struct drm_minor *minor)
+{
+ int ret;
+
+ if (!minor)
+ return 0;
+
+ ret = msm_rd_debugfs_init(minor);
+ if (ret) {
+ DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
+ return ret;
+ }
+
+ ret = msm_perf_debugfs_init(minor);
+ if (ret) {
+ DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_debugfs_late_init(struct drm_device *dev)
+{
+ int ret;
+ ret = late_init_minor(dev->primary);
+ if (ret)
+ return ret;
+ ret = late_init_minor(dev->render);
+ return ret;
+}
+
+void msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
+ dev, &msm_gpu_fops);
+
+ debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
+ dev, &msm_kms_fops);
+
+ debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
+ &priv->hangcheck_period);
+
+ debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
+ &priv->disable_err_irq);
+
+ debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
+ dev, &shrink_fops);
+
+ if (priv->kms && priv->kms->funcs->debugfs_init)
+ priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
+ &fail_gem_alloc);
+ fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
+ &fail_gem_iova);
+#endif
+}
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h
new file mode 100644
index 000000000..ef58f66ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_DEBUGFS_H__
+#define __MSM_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+void msm_debugfs_init(struct drm_minor *minor);
+#endif
+
+#endif /* __MSM_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 000000000..f982a827b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,1349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
+#include <linux/kthread.h>
+#include <linux/sched/mm.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/sched/types.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_of.h>
+#include <drm/drm_vblank.h>
+
+#include "disp/msm_disp_snapshot.h"
+#include "msm_drv.h"
+#include "msm_debugfs.h"
+#include "msm_fence.h"
+#include "msm_gem.h"
+#include "msm_gpu.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+
+/*
+ * MSM driver version:
+ * - 1.0.0 - initial interface
+ * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ * - 1.2.0 - adds explicit fence support for submit ioctl
+ * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
+ * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
+ * MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ * GEM object's debug name
+ * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
+ * - 1.6.0 - Syncobj support
+ * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
+ * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
+ * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
+ */
+#define MSM_VERSION_MAJOR 1
+#define MSM_VERSION_MINOR 9
+#define MSM_VERSION_PATCHLEVEL 0
+
+static void msm_deinit_vram(struct drm_device *ddev);
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = msm_framebuffer_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+ .atomic_commit_tail = msm_atomic_commit_tail,
+};
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static bool fbdev = true;
+MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
+module_param(fbdev, bool, 0600);
+#endif
+
+static char *vram = "16m";
+MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
+module_param(vram, charp, 0);
+
+bool dumpstate;
+MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
+module_param(dumpstate, bool, 0600);
+
+static bool modeset = true;
+MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
+module_param(modeset, bool, 0600);
+
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(fail_gem_alloc);
+DECLARE_FAULT_ATTR(fail_gem_iova);
+#endif
+
+static irqreturn_t msm_irq(int irq, void *arg)
+{
+ struct drm_device *dev = arg;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ return kms->funcs->irq(kms);
+}
+
+static void msm_irq_preinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ kms->funcs->irq_preinstall(kms);
+}
+
+static int msm_irq_postinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ BUG_ON(!kms);
+
+ if (kms->funcs->irq_postinstall)
+ return kms->funcs->irq_postinstall(kms);
+
+ return 0;
+}
+
+static int msm_irq_install(struct drm_device *dev, unsigned int irq)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int ret;
+
+ if (irq == IRQ_NOTCONNECTED)
+ return -ENOTCONN;
+
+ msm_irq_preinstall(dev);
+
+ ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
+ if (ret)
+ return ret;
+
+ kms->irq_requested = true;
+
+ ret = msm_irq_postinstall(dev);
+ if (ret) {
+ free_irq(irq, dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void msm_irq_uninstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ kms->funcs->irq_uninstall(kms);
+ if (kms->irq_requested)
+ free_irq(kms->irq, dev);
+}
+
+struct msm_vblank_work {
+ struct work_struct work;
+ int crtc_id;
+ bool enable;
+ struct msm_drm_private *priv;
+};
+
+static void vblank_ctrl_worker(struct work_struct *work)
+{
+ struct msm_vblank_work *vbl_work = container_of(work,
+ struct msm_vblank_work, work);
+ struct msm_drm_private *priv = vbl_work->priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (vbl_work->enable)
+ kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+ else
+ kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+
+ kfree(vbl_work);
+}
+
+static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
+ int crtc_id, bool enable)
+{
+ struct msm_vblank_work *vbl_work;
+
+ vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+ if (!vbl_work)
+ return -ENOMEM;
+
+ INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
+
+ vbl_work->crtc_id = crtc_id;
+ vbl_work->enable = enable;
+ vbl_work->priv = priv;
+
+ queue_work(priv->wq, &vbl_work->work);
+
+ return 0;
+}
+
+static int msm_drm_uninit(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *ddev = priv->dev;
+ struct msm_kms *kms = priv->kms;
+ int i;
+
+ /*
+ * Shutdown the hw if we're far enough along where things might be on.
+ * If we run this too early, we'll end up panicking in any variety of
+ * places. Since we don't register the drm device until late in
+ * msm_drm_init, drm_dev->registered is used as an indicator that the
+ * shutdown will be successful.
+ */
+ if (ddev->registered) {
+ drm_dev_unregister(ddev);
+ drm_atomic_helper_shutdown(ddev);
+ }
+
+ /* We must cancel and cleanup any pending vblank enable/disable
+ * work before msm_irq_uninstall() to avoid work re-enabling an
+ * irq after uninstall has disabled it.
+ */
+
+ flush_workqueue(priv->wq);
+
+ /* clean up event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->event_thread[i].worker)
+ kthread_destroy_worker(priv->event_thread[i].worker);
+ }
+
+ msm_gem_shrinker_cleanup(ddev);
+
+ drm_kms_helper_poll_fini(ddev);
+
+ msm_perf_debugfs_cleanup(priv);
+ msm_rd_debugfs_cleanup(priv);
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (fbdev && priv->fbdev)
+ msm_fbdev_free(ddev);
+#endif
+
+ if (kms)
+ msm_disp_snapshot_destroy(ddev);
+
+ drm_mode_config_cleanup(ddev);
+
+ for (i = 0; i < priv->num_bridges; i++)
+ drm_bridge_remove(priv->bridges[i]);
+ priv->num_bridges = 0;
+
+ if (kms) {
+ pm_runtime_get_sync(dev);
+ msm_irq_uninstall(ddev);
+ pm_runtime_put_sync(dev);
+ }
+
+ if (kms && kms->funcs)
+ kms->funcs->destroy(kms);
+
+ msm_deinit_vram(ddev);
+
+ component_unbind_all(dev, ddev);
+
+ ddev->dev_private = NULL;
+ drm_dev_put(ddev);
+
+ destroy_workqueue(priv->wq);
+
+ return 0;
+}
+
+#include <linux/of_address.h>
+
+struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
+{
+ struct iommu_domain *domain;
+ struct msm_gem_address_space *aspace;
+ struct msm_mmu *mmu;
+ struct device *mdp_dev = dev->dev;
+ struct device *mdss_dev = mdp_dev->parent;
+ struct device *iommu_dev;
+
+ /*
+ * IOMMUs can be a part of MDSS device tree binding, or the
+ * MDP/DPU device.
+ */
+ if (device_iommu_mapped(mdp_dev))
+ iommu_dev = mdp_dev;
+ else
+ iommu_dev = mdss_dev;
+
+ domain = iommu_domain_alloc(iommu_dev->bus);
+ if (!domain) {
+ drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
+ return NULL;
+ }
+
+ mmu = msm_iommu_new(iommu_dev, domain);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(domain);
+ return ERR_CAST(mmu);
+ }
+
+ aspace = msm_gem_address_space_create(mmu, "mdp_kms",
+ 0x1000, 0x100000000 - 0x1000);
+ if (IS_ERR(aspace))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+bool msm_use_mmu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /*
+ * a2xx comes with its own MMU
+ * On other platforms IOMMU can be declared specified either for the
+ * MDP/DPU device or for its parent, MDSS device.
+ */
+ return priv->is_a2xx ||
+ device_iommu_mapped(dev->dev) ||
+ device_iommu_mapped(dev->dev->parent);
+}
+
+static int msm_init_vram(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct device_node *node;
+ unsigned long size = 0;
+ int ret = 0;
+
+ /* In the device-tree world, we could have a 'memory-region'
+ * phandle, which gives us a link to our "vram". Allocating
+ * is all nicely abstracted behind the dma api, but we need
+ * to know the entire size to allocate it all in one go. There
+ * are two cases:
+ * 1) device with no IOMMU, in which case we need exclusive
+ * access to a VRAM carveout big enough for all gpu
+ * buffers
+ * 2) device with IOMMU, but where the bootloader puts up
+ * a splash screen. In this case, the VRAM carveout
+ * need only be large enough for fbdev fb. But we need
+ * exclusive access to the buffer to avoid the kernel
+ * using those pages for other purposes (which appears
+ * as corruption on screen before we have a chance to
+ * load and do initial modeset)
+ */
+
+ node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
+ if (node) {
+ struct resource r;
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret)
+ return ret;
+ size = r.end - r.start + 1;
+ DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
+
+ /* if we have no IOMMU, then we need to use carveout allocator.
+ * Grab the entire DMA chunk carved out in early startup in
+ * mach-msm:
+ */
+ } else if (!msm_use_mmu(dev)) {
+ DRM_INFO("using %s VRAM carveout\n", vram);
+ size = memparse(vram, NULL);
+ }
+
+ if (size) {
+ unsigned long attrs = 0;
+ void *p;
+
+ priv->vram.size = size;
+
+ drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+ spin_lock_init(&priv->vram.lock);
+
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ attrs |= DMA_ATTR_WRITE_COMBINE;
+
+ /* note that for no-kernel-mapping, the vaddr returned
+ * is bogus, but non-null if allocation succeeded:
+ */
+ p = dma_alloc_attrs(dev->dev, size,
+ &priv->vram.paddr, GFP_KERNEL, attrs);
+ if (!p) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
+ priv->vram.paddr = 0;
+ return -ENOMEM;
+ }
+
+ DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
+ (uint32_t)priv->vram.paddr,
+ (uint32_t)(priv->vram.paddr + size));
+ }
+
+ return ret;
+}
+
+static void msm_deinit_vram(struct drm_device *ddev)
+{
+ struct msm_drm_private *priv = ddev->dev_private;
+ unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+
+ if (!priv->vram.paddr)
+ return;
+
+ drm_mm_takedown(&priv->vram.mm);
+ dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
+ attrs);
+}
+
+static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev;
+ struct msm_kms *kms;
+ int ret, i;
+
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ ddev = drm_dev_alloc(drv, dev);
+ if (IS_ERR(ddev)) {
+ DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
+ return PTR_ERR(ddev);
+ }
+ ddev->dev_private = priv;
+ priv->dev = ddev;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto err_put_dev;
+ }
+
+ INIT_LIST_HEAD(&priv->objects);
+ mutex_init(&priv->obj_lock);
+
+ /*
+ * Initialize the LRUs:
+ */
+ mutex_init(&priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
+
+ /* Teach lockdep about lock ordering wrt. shrinker: */
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&priv->lru.lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ drm_mode_config_init(ddev);
+
+ ret = msm_init_vram(ddev);
+ if (ret)
+ goto err_cleanup_mode_config;
+
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
+ if (ret)
+ goto err_deinit_vram;
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ msm_gem_shrinker_init(ddev);
+
+ if (priv->kms_init) {
+ ret = priv->kms_init(ddev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
+ priv->kms = NULL;
+ goto err_msm_uninit;
+ }
+ kms = priv->kms;
+ } else {
+ /* valid only for the dummy headless case, where of_node=NULL */
+ WARN_ON(dev->of_node);
+ kms = NULL;
+ }
+
+ /* Enable normalization of plane zpos */
+ ddev->mode_config.normalize_zpos = true;
+
+ if (kms) {
+ kms->dev = ddev;
+ ret = kms->funcs->hw_init(kms);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
+ goto err_msm_uninit;
+ }
+ }
+
+ drm_helper_move_panel_connectors_to_head(ddev);
+
+ ddev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+ for (i = 0; i < priv->num_crtcs; i++) {
+ /* initialize event thread */
+ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ priv->event_thread[i].dev = ddev;
+ priv->event_thread[i].worker = kthread_create_worker(0,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+ if (IS_ERR(priv->event_thread[i].worker)) {
+ ret = PTR_ERR(priv->event_thread[i].worker);
+ DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].worker = NULL;
+ goto err_msm_uninit;
+ }
+
+ sched_set_fifo(priv->event_thread[i].worker->task);
+ }
+
+ ret = drm_vblank_init(ddev, priv->num_crtcs);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
+ goto err_msm_uninit;
+ }
+
+ if (kms) {
+ pm_runtime_get_sync(dev);
+ ret = msm_irq_install(ddev, kms->irq);
+ pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
+ goto err_msm_uninit;
+ }
+ }
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto err_msm_uninit;
+
+ if (kms) {
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret)
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+ }
+ drm_mode_config_reset(ddev);
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (kms && fbdev)
+ priv->fbdev = msm_fbdev_init(ddev);
+#endif
+
+ ret = msm_debugfs_late_init(ddev);
+ if (ret)
+ goto err_msm_uninit;
+
+ drm_kms_helper_poll_init(ddev);
+
+ return 0;
+
+err_msm_uninit:
+ msm_drm_uninit(dev);
+
+ return ret;
+
+err_deinit_vram:
+ msm_deinit_vram(ddev);
+err_cleanup_mode_config:
+ drm_mode_config_cleanup(ddev);
+ destroy_workqueue(priv->wq);
+err_put_dev:
+ drm_dev_put(ddev);
+
+ return ret;
+}
+
+/*
+ * DRM operations:
+ */
+
+static void load_gpu(struct drm_device *dev)
+{
+ static DEFINE_MUTEX(init_lock);
+ struct msm_drm_private *priv = dev->dev_private;
+
+ mutex_lock(&init_lock);
+
+ if (!priv->gpu)
+ priv->gpu = adreno_load_gpu(dev);
+
+ mutex_unlock(&init_lock);
+}
+
+static int context_init(struct drm_device *dev, struct drm_file *file)
+{
+ static atomic_t ident = ATOMIC_INIT(0);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->submitqueues);
+ rwlock_init(&ctx->queuelock);
+
+ kref_init(&ctx->ref);
+ msm_submitqueue_init(dev, ctx);
+
+ ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
+ file->driver_priv = ctx;
+
+ ctx->seqno = atomic_inc_return(&ident);
+
+ return 0;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ /* For now, load gpu on open.. to avoid the requirement of having
+ * firmware in the initrd.
+ */
+ load_gpu(dev);
+
+ return context_init(dev, file);
+}
+
+static void context_close(struct msm_file_private *ctx)
+{
+ msm_submitqueue_close(ctx);
+ msm_file_private_put(ctx);
+}
+
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+
+ /*
+ * It is not possible to set sysprof param to non-zero if gpu
+ * is not initialized:
+ */
+ if (priv->gpu)
+ msm_file_private_set_sysprof(ctx, priv->gpu, 0);
+
+ context_close(ctx);
+}
+
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return -ENXIO;
+ drm_dbg_vbl(dev, "crtc=%u", pipe);
+ return vblank_ctrl_queue_work(priv, pipe, true);
+}
+
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return;
+ drm_dbg_vbl(dev, "crtc=%u", pipe);
+ vblank_ctrl_queue_work(priv, pipe, false);
+}
+
+/*
+ * DRM ioctls:
+ */
+
+static int msm_ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_param *args = data;
+ struct msm_gpu *gpu;
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (!gpu)
+ return -ENXIO;
+
+ return gpu->funcs->get_param(gpu, file->driver_priv,
+ args->param, &args->value, &args->len);
+}
+
+static int msm_ioctl_set_param(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_param *args = data;
+ struct msm_gpu *gpu;
+
+ if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (!gpu)
+ return -ENXIO;
+
+ return gpu->funcs->set_param(gpu, file->driver_priv,
+ args->param, args->value, args->len);
+}
+
+static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_new *args = data;
+ uint32_t flags = args->flags;
+
+ if (args->flags & ~MSM_BO_FLAGS) {
+ DRM_ERROR("invalid flags: %08x\n", args->flags);
+ return -EINVAL;
+ }
+
+ /*
+ * Uncached CPU mappings are deprecated, as of:
+ *
+ * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
+ *
+ * So promote them to WC.
+ */
+ if (flags & MSM_BO_UNCACHED) {
+ flags &= ~MSM_BO_CACHED;
+ flags |= MSM_BO_WC;
+ }
+
+ if (should_fail(&fail_gem_alloc, args->size))
+ return -ENOMEM;
+
+ return msm_gem_new_handle(dev, file, args->size,
+ args->flags, &args->handle, NULL);
+}
+
+static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
+{
+ return ktime_set(timeout.tv_sec, timeout.tv_nsec);
+}
+
+static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ ktime_t timeout = to_ktime(args->timeout);
+ int ret;
+
+ if (args->op & ~MSM_PREP_FLAGS) {
+ DRM_ERROR("invalid op: %08x\n", args->op);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_prep(obj, args->op, &timeout);
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_fini(obj);
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_info_iova(struct drm_device *dev,
+ struct drm_file *file, struct drm_gem_object *obj,
+ uint64_t *iova)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+
+ if (!priv->gpu)
+ return -EINVAL;
+
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
+ /*
+ * Don't pin the memory here - just get an address so that userspace can
+ * be productive
+ */
+ return msm_gem_get_iova(obj, ctx->aspace, iova);
+}
+
+static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
+ struct drm_file *file, struct drm_gem_object *obj,
+ uint64_t iova)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+
+ if (!priv->gpu)
+ return -EINVAL;
+
+ /* Only supported if per-process address space is supported: */
+ if (priv->gpu->aspace == ctx->aspace)
+ return -EOPNOTSUPP;
+
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
+ return msm_gem_set_iova(obj, ctx->aspace, iova);
+}
+
+static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_info *args = data;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ int i, ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ case MSM_INFO_GET_IOVA:
+ case MSM_INFO_SET_IOVA:
+ /* value returned as immediate, not pointer, so len==0: */
+ if (args->len)
+ return -EINVAL;
+ break;
+ case MSM_INFO_SET_NAME:
+ case MSM_INFO_GET_NAME:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ msm_obj = to_msm_bo(obj);
+
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ args->value = msm_gem_mmap_offset(obj);
+ break;
+ case MSM_INFO_GET_IOVA:
+ ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
+ break;
+ case MSM_INFO_SET_IOVA:
+ ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
+ break;
+ case MSM_INFO_SET_NAME:
+ /* length check should leave room for terminating null: */
+ if (args->len >= sizeof(msm_obj->name)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
+ args->len)) {
+ msm_obj->name[0] = '\0';
+ ret = -EFAULT;
+ break;
+ }
+ msm_obj->name[args->len] = '\0';
+ for (i = 0; i < args->len; i++) {
+ if (!isprint(msm_obj->name[i])) {
+ msm_obj->name[i] = '\0';
+ break;
+ }
+ }
+ break;
+ case MSM_INFO_GET_NAME:
+ if (args->value && (args->len < strlen(msm_obj->name))) {
+ ret = -EINVAL;
+ break;
+ }
+ args->len = strlen(msm_obj->name);
+ if (args->value) {
+ if (copy_to_user(u64_to_user_ptr(args->value),
+ msm_obj->name, args->len))
+ ret = -EFAULT;
+ }
+ break;
+ }
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+ ktime_t timeout)
+{
+ struct dma_fence *fence;
+ int ret;
+
+ if (fence_after(fence_id, queue->last_fence)) {
+ DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
+ fence_id, queue->last_fence);
+ return -EINVAL;
+ }
+
+ /*
+ * Map submitqueue scoped "seqno" (which is actually an idr key)
+ * back to underlying dma-fence
+ *
+ * The fence is removed from the fence_idr when the submit is
+ * retired, so if the fence is not found it means there is nothing
+ * to wait for
+ */
+ spin_lock(&queue->idr_lock);
+ fence = idr_find(&queue->fence_idr, fence_id);
+ if (fence)
+ fence = dma_fence_get_rcu(fence);
+ spin_unlock(&queue->idr_lock);
+
+ if (!fence)
+ return 0;
+
+ ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
+
+ dma_fence_put(fence);
+
+ return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_wait_fence *args = data;
+ struct msm_gpu_submitqueue *queue;
+ int ret;
+
+ if (args->pad) {
+ DRM_ERROR("invalid pad: %08x\n", args->pad);
+ return -EINVAL;
+ }
+
+ if (!priv->gpu)
+ return 0;
+
+ queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ switch (args->madv) {
+ case MSM_MADV_DONTNEED:
+ case MSM_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ ret = msm_gem_madvise(obj, args->madv);
+ if (ret >= 0) {
+ args->retained = ret;
+ ret = 0;
+ }
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+
+static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_submitqueue *args = data;
+
+ if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
+ return -EINVAL;
+
+ return msm_submitqueue_create(dev, file->driver_priv, args->prio,
+ args->flags, &args->id);
+}
+
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return msm_submitqueue_query(dev, file->driver_priv, data);
+}
+
+static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ u32 id = *(u32 *) data;
+
+ return msm_submitqueue_remove(file->driver_priv, id);
+}
+
+static const struct drm_ioctl_desc msm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
+};
+
+static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct drm_file *file = f->private_data;
+ struct drm_device *dev = file->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ if (!priv->gpu)
+ return;
+
+ msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = msm_fop_show_fdinfo,
+};
+
+static const struct drm_driver msm_driver = {
+ .driver_features = DRIVER_GEM |
+ DRIVER_RENDER |
+ DRIVER_ATOMIC |
+ DRIVER_MODESET |
+ DRIVER_SYNCOBJ,
+ .open = msm_open,
+ .postclose = msm_postclose,
+ .lastclose = drm_fb_helper_lastclose,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+ .gem_prime_mmap = msm_gem_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+#endif
+ .ioctls = msm_ioctls,
+ .num_ioctls = ARRAY_SIZE(msm_ioctls),
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .date = "20130625",
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
+};
+
+int msm_pm_prepare(struct device *dev)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
+
+ if (!priv || !priv->kms)
+ return 0;
+
+ return drm_mode_config_helper_suspend(ddev);
+}
+
+void msm_pm_complete(struct device *dev)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *ddev = priv ? priv->dev : NULL;
+
+ if (!priv || !priv->kms)
+ return;
+
+ drm_mode_config_helper_resume(ddev);
+}
+
+static const struct dev_pm_ops msm_pm_ops = {
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+/*
+ * Componentized driver support:
+ */
+
+/*
+ * Identify what components need to be added by parsing what remote-endpoints
+ * our MDP output ports are connected to. In the case of LVDS on MDP4, there
+ * is no external component that we need to add since LVDS is within MDP4
+ * itself.
+ */
+static int add_components_mdp(struct device *master_dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np = master_dev->of_node;
+ struct device_node *ep_node;
+
+ for_each_endpoint_of_node(np, ep_node) {
+ struct device_node *intf;
+ struct of_endpoint ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ if (ret) {
+ DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ /*
+ * The LCDC/LVDS port on MDP4 is a speacial case where the
+ * remote-endpoint isn't a component that we need to add
+ */
+ if (of_device_is_compatible(np, "qcom,mdp4") &&
+ ep.port == 0)
+ continue;
+
+ /*
+ * It's okay if some of the ports don't have a remote endpoint
+ * specified. It just means that the port isn't connected to
+ * any external interface.
+ */
+ intf = of_graph_get_remote_port_parent(ep_node);
+ if (!intf)
+ continue;
+
+ if (of_device_is_available(intf))
+ drm_of_component_match_add(master_dev, matchptr,
+ component_compare_of, intf);
+
+ of_node_put(intf);
+ }
+
+ return 0;
+}
+
+/*
+ * We don't know what's the best binding to link the gpu with the drm device.
+ * Fow now, we just hunt for all the possible gpus that we support, and add them
+ * as components.
+ */
+static const struct of_device_id msm_gpu_match[] = {
+ { .compatible = "qcom,adreno" },
+ { .compatible = "qcom,adreno-3xx" },
+ { .compatible = "amd,imageon" },
+ { .compatible = "qcom,kgsl-3d0" },
+ { },
+};
+
+static int add_gpu_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, msm_gpu_match);
+ if (!np)
+ return 0;
+
+ if (of_device_is_available(np))
+ drm_of_component_match_add(dev, matchptr, component_compare_of, np);
+
+ of_node_put(np);
+
+ return 0;
+}
+
+static int msm_drm_bind(struct device *dev)
+{
+ return msm_drm_init(dev, &msm_driver);
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ msm_drm_uninit(dev);
+}
+
+const struct component_master_ops msm_drm_ops = {
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
+};
+
+int msm_drv_probe(struct device *master_dev,
+ int (*kms_init)(struct drm_device *dev))
+{
+ struct msm_drm_private *priv;
+ struct component_match *match = NULL;
+ int ret;
+
+ priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->kms_init = kms_init;
+ dev_set_drvdata(master_dev, priv);
+
+ /* Add mdp components if we have KMS. */
+ if (kms_init) {
+ ret = add_components_mdp(master_dev, &match);
+ if (ret)
+ return ret;
+ }
+
+ ret = add_gpu_components(master_dev, &match);
+ if (ret)
+ return ret;
+
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(master_dev, ~0);
+ if (ret)
+ return ret;
+
+ ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Platform driver:
+ * Used only for headlesss GPU instances
+ */
+
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, NULL);
+}
+
+static int msm_pdev_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+void msm_drv_shutdown(struct platform_device *pdev)
+{
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct drm_device *drm = priv ? priv->dev : NULL;
+
+ /*
+ * Shutdown the hw if we're far enough along where things might be on.
+ * If we run this too early, we'll end up panicking in any variety of
+ * places. Since we don't register the drm device until late in
+ * msm_drm_init, drm_dev->registered is used as an indicator that the
+ * shutdown will be successful.
+ */
+ if (drm && drm->registered && priv->kms)
+ drm_atomic_helper_shutdown(drm);
+}
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_pdev_probe,
+ .remove = msm_pdev_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "msm",
+ .pm = &msm_pm_ops,
+ },
+};
+
+static int __init msm_drm_register(void)
+{
+ if (!modeset)
+ return -EINVAL;
+
+ DBG("init");
+ msm_mdp_register();
+ msm_dpu_register();
+ msm_dsi_register();
+ msm_hdmi_register();
+ msm_dp_register();
+ adreno_register();
+ msm_mdp4_register();
+ msm_mdss_register();
+ return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_drm_unregister(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&msm_platform_driver);
+ msm_mdss_unregister();
+ msm_mdp4_unregister();
+ msm_dp_unregister();
+ msm_hdmi_unregister();
+ adreno_unregister();
+ msm_dsi_unregister();
+ msm_mdp_unregister();
+ msm_dpu_unregister();
+}
+
+module_init(msm_drm_register);
+module_exit(msm_drm_unregister);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("MSM DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 000000000..d4e0ef608
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_DRV_H__
+#define __MSM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/component.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/types.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/sizes.h>
+#include <linux/kthread.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/display/drm_dsc.h>
+#include <drm/msm_drm.h>
+#include <drm/drm_gem.h>
+
+#ifdef CONFIG_FAULT_INJECTION
+extern struct fault_attr fail_gem_alloc;
+extern struct fault_attr fail_gem_iova;
+#else
+# define should_fail(attr, size) 0
+#endif
+
+struct msm_kms;
+struct msm_gpu;
+struct msm_mmu;
+struct msm_mdss;
+struct msm_rd_state;
+struct msm_perf_state;
+struct msm_gem_submit;
+struct msm_fence_context;
+struct msm_gem_address_space;
+struct msm_gem_vma;
+struct msm_disp_state;
+
+#define MAX_CRTCS 8
+#define MAX_BRIDGES 8
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+enum msm_dp_controller {
+ MSM_DP_CONTROLLER_0,
+ MSM_DP_CONTROLLER_1,
+ MSM_DP_CONTROLLER_2,
+ MSM_DP_CONTROLLER_COUNT,
+};
+
+#define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+ MSM_ENC_COMMIT_DONE = 0,
+ MSM_ENC_TX_COMPLETE,
+ MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_enc: number of compression encoder blocks used
+ * @num_intf: number of interfaces the panel is mounted on
+ * @num_dspp: number of dspp blocks used
+ * @num_dsc: number of Display Stream Compression (DSC) blocks used
+ */
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_enc;
+ u32 num_intf;
+ u32 num_dspp;
+ u32 num_dsc;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ unsigned int crtc_id;
+ struct kthread_worker *worker;
+};
+
+struct msm_drm_private {
+
+ struct drm_device *dev;
+
+ struct msm_kms *kms;
+ int (*kms_init)(struct drm_device *dev);
+
+ /* subordinate devices, if present: */
+ struct platform_device *gpu_pdev;
+
+ /* possibly this should be in the kms component, but it is
+ * shared by both mdp4 and mdp5..
+ */
+ struct hdmi *hdmi;
+
+ /* DSI is shared by mdp4 and mdp5 */
+ struct msm_dsi *dsi[2];
+
+ struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
+
+ /* when we have more than one 'msm_gpu' these need to be an array: */
+ struct msm_gpu *gpu;
+
+ /* gpu is only set on open(), but we need this info earlier */
+ bool is_a2xx;
+ bool has_cached_coherent;
+
+ struct drm_fb_helper *fbdev;
+
+ struct msm_rd_state *rd; /* debugfs to dump all submits */
+ struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
+ struct msm_perf_state *perf;
+
+ /**
+ * List of all GEM objects (mainly for debugfs, protected by obj_lock
+ * (acquire before per GEM object lock)
+ */
+ struct list_head objects;
+ struct mutex obj_lock;
+
+ /**
+ * lru:
+ *
+ * The various LRU's that a GEM object is in at various stages of
+ * it's lifetime. Objects start out in the unbacked LRU. When
+ * pinned (for scannout or permanently mapped GPU buffers, like
+ * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
+ * unpinned, it moves into willneed or dontneed LRU depending on
+ * madvise state. When backing pages are evicted (willneed) or
+ * purged (dontneed) it moves back into the unbacked LRU.
+ *
+ * The dontneed LRU is considered by the shrinker for objects
+ * that are candidate for purging, and the willneed LRU is
+ * considered for objects that could be evicted.
+ */
+ struct {
+ /**
+ * unbacked:
+ *
+ * The LRU for GEM objects without backing pages allocated.
+ * This mostly exists so that objects are always is one
+ * LRU.
+ */
+ struct drm_gem_lru unbacked;
+
+ /**
+ * pinned:
+ *
+ * The LRU for pinned GEM objects
+ */
+ struct drm_gem_lru pinned;
+
+ /**
+ * willneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * WILLNEED state (ie. can be evicted)
+ */
+ struct drm_gem_lru willneed;
+
+ /**
+ * dontneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * DONTNEED state (ie. can be purged)
+ */
+ struct drm_gem_lru dontneed;
+
+ /**
+ * lock:
+ *
+ * Protects manipulation of all of the LRUs.
+ */
+ struct mutex lock;
+ } lru;
+
+ struct workqueue_struct *wq;
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_thread event_thread[MAX_CRTCS];
+
+ unsigned int num_bridges;
+ struct drm_bridge *bridges[MAX_BRIDGES];
+
+ /* VRAM carveout, used when no IOMMU: */
+ struct {
+ unsigned long size;
+ dma_addr_t paddr;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ spinlock_t lock; /* Protects drm_mm node allocation/removal */
+ } vram;
+
+ struct notifier_block vmap_notifier;
+ struct shrinker shrinker;
+
+ struct drm_atomic_state *pm_state;
+
+ /**
+ * hangcheck_period: For hang detection, in ms
+ *
+ * Note that in practice, a submit/job will get at least two hangcheck
+ * periods, due to checking for progress being implemented as simply
+ * "have the CP position registers changed since last time?"
+ */
+ unsigned int hangcheck_period;
+
+ /**
+ * disable_err_irq:
+ *
+ * Disable handling of GPU hw error interrupts, to force fallback to
+ * sw hangcheck timer. Written (via debugfs) by igt tests to test
+ * the sw hangcheck mechanism.
+ */
+ bool disable_err_irq;
+};
+
+struct msm_format {
+ uint32_t pixel_format;
+};
+
+struct msm_pending_timer;
+
+int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
+ struct msm_kms *kms, int crtc_idx);
+void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
+void msm_atomic_commit_tail(struct drm_atomic_state *state);
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
+void msm_atomic_state_clear(struct drm_atomic_state *state);
+void msm_atomic_state_free(struct drm_atomic_state *state);
+
+int msm_crtc_enable_vblank(struct drm_crtc *crtc);
+void msm_crtc_disable_vblank(struct drm_crtc *crtc);
+
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
+struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev);
+bool msm_use_mmu(struct drm_device *dev);
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#ifdef CONFIG_DEBUG_FS
+unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
+#endif
+
+void msm_gem_shrinker_init(struct drm_device *dev);
+void msm_gem_shrinker_cleanup(struct drm_device *dev);
+
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg);
+int msm_gem_prime_pin(struct drm_gem_object *obj);
+void msm_gem_prime_unpin(struct drm_gem_object *obj);
+
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, bool needs_dirtyfb);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, bool needed_dirtyfb);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
+ int w, int h, int p, uint32_t format);
+
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+void msm_fbdev_free(struct drm_device *dev);
+
+struct hdmi;
+#ifdef CONFIG_DRM_MSM_HDMI
+int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+ struct drm_encoder *encoder);
+void __init msm_hdmi_register(void);
+void __exit msm_hdmi_unregister(void);
+#else
+static inline int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ return -EINVAL;
+}
+static inline void __init msm_hdmi_register(void) {}
+static inline void __exit msm_hdmi_unregister(void) {}
+#endif
+
+struct msm_dsi;
+#ifdef CONFIG_DRM_MSM_DSI
+int dsi_dev_attach(struct platform_device *pdev);
+void dsi_dev_detach(struct platform_device *pdev);
+void __init msm_dsi_register(void);
+void __exit msm_dsi_unregister(void);
+int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
+ struct drm_encoder *encoder);
+void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
+bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
+bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
+bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
+#else
+static inline void __init msm_dsi_register(void)
+{
+}
+static inline void __exit msm_dsi_unregister(void)
+{
+}
+static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
+ struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ return -EINVAL;
+}
+static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
+{
+}
+static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
+static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
+static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
+{
+ return false;
+}
+
+static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_DRM_MSM_DP
+int __init msm_dp_register(void);
+void __exit msm_dp_unregister(void);
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder);
+void msm_dp_irq_postinstall(struct msm_dp *dp_display);
+void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
+bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
+
+#else
+static inline int __init msm_dp_register(void)
+{
+ return -EINVAL;
+}
+static inline void __exit msm_dp_unregister(void)
+{
+}
+static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ return -EINVAL;
+}
+
+static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+}
+
+static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
+{
+}
+
+static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
+ struct drm_minor *minor)
+{
+}
+
+static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
+{
+ return false;
+}
+
+#endif
+
+#ifdef CONFIG_DRM_MSM_MDP4
+void msm_mdp4_register(void);
+void msm_mdp4_unregister(void);
+#else
+static inline void msm_mdp4_register(void) {}
+static inline void msm_mdp4_unregister(void) {}
+#endif
+
+#ifdef CONFIG_DRM_MSM_MDP5
+void msm_mdp_register(void);
+void msm_mdp_unregister(void);
+#else
+static inline void msm_mdp_register(void) {}
+static inline void msm_mdp_unregister(void) {}
+#endif
+
+#ifdef CONFIG_DRM_MSM_DPU
+void msm_dpu_register(void);
+void msm_dpu_unregister(void);
+#else
+static inline void msm_dpu_register(void) {}
+static inline void msm_dpu_unregister(void) {}
+#endif
+
+#ifdef CONFIG_DRM_MSM_MDSS
+void msm_mdss_register(void);
+void msm_mdss_unregister(void);
+#else
+static inline void msm_mdss_register(void) {}
+static inline void msm_mdss_unregister(void) {}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+int msm_debugfs_late_init(struct drm_device *dev);
+int msm_rd_debugfs_init(struct drm_minor *minor);
+void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
+__printf(3, 4)
+void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...);
+int msm_perf_debugfs_init(struct drm_minor *minor);
+void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
+#else
+static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+__printf(3, 4)
+static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
+ struct msm_gem_submit *submit,
+ const char *fmt, ...) {}
+static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
+static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
+#endif
+
+struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name);
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name);
+void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
+ phys_addr_t *size);
+void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
+
+struct icc_path *msm_icc_get(struct device *dev, const char *name);
+
+#define msm_writel(data, addr) writel((data), (addr))
+#define msm_readl(addr) readl((addr))
+
+static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or)
+{
+ u32 val = msm_readl(addr);
+
+ val &= ~mask;
+ msm_writel(val | or, addr);
+}
+
+/**
+ * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
+ *
+ * @timer: hrtimer to control when the kthread work is triggered
+ * @work: the kthread work
+ * @worker: the kthread worker the work will be scheduled on
+ */
+struct msm_hrtimer_work {
+ struct hrtimer timer;
+ struct kthread_work work;
+ struct kthread_worker *worker;
+};
+
+void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
+ ktime_t wakeup_time,
+ enum hrtimer_mode mode);
+void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
+ struct kthread_worker *worker,
+ kthread_work_func_t fn,
+ clockid_t clock_id,
+ enum hrtimer_mode mode);
+
+#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
+
+static inline int align_pitch(int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* adreno needs pitch aligned to 32 pixels: */
+ return bytespp * ALIGN(width, 32);
+}
+
+/* for the generated headers: */
+#define INVALID_IDX(idx) ({BUG(); 0;})
+#define fui(x) ({BUG(); 0;})
+#define _mesa_float_to_half(x) ({BUG(); 0;})
+
+
+#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
+
+/* for conditionally setting boolean flag(s): */
+#define COND(bool, val) ((bool) ? (val) : 0)
+
+static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
+{
+ ktime_t now = ktime_get();
+ s64 remaining_jiffies;
+
+ if (ktime_compare(*timeout, now) < 0) {
+ remaining_jiffies = 0;
+ } else {
+ ktime_t rem = ktime_sub(*timeout, now);
+ remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
+ }
+
+ return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
+}
+
+/* Driver helpers */
+
+extern const struct component_master_ops msm_drm_ops;
+
+int msm_pm_prepare(struct device *dev);
+void msm_pm_complete(struct device *dev);
+
+int msm_drv_probe(struct device *dev,
+ int (*kms_init)(struct drm_device *dev));
+void msm_drv_shutdown(struct platform_device *pdev);
+
+
+#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 000000000..e3f61c39d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gem.h"
+
+struct msm_framebuffer {
+ struct drm_framebuffer base;
+ const struct msm_format *format;
+
+ /* Count of # of attached planes which need dirtyfb: */
+ refcount_t dirtyfb;
+
+ /* Framebuffer per-plane address, if pinned, else zero: */
+ uint64_t iova[DRM_FORMAT_MAX_PLANES];
+ atomic_t prepare_count;
+};
+#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+
+static int msm_framebuffer_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+
+ /* If this fb is not used on any display requiring pixel data to be
+ * flushed, then skip dirtyfb
+ */
+ if (refcount_read(&msm_fb->dirtyfb) == 1)
+ return 0;
+
+ return drm_atomic_helper_dirtyfb(fb, file_priv, flags, color,
+ clips, num_clips);
+}
+
+static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .dirty = msm_framebuffer_dirtyfb,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct msm_gem_stats stats = {};
+ int i, n = fb->format->num_planes;
+
+ seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
+ fb->width, fb->height, (char *)&fb->format->format,
+ drm_framebuffer_read_refcount(fb), fb->base.id);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ msm_gem_describe(fb->obj[i], m, &stats);
+ }
+}
+#endif
+
+/* prepare/pin all the fb's bo's for scanout.
+ */
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace,
+ bool needs_dirtyfb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int ret, i, n = fb->format->num_planes;
+
+ if (needs_dirtyfb)
+ refcount_inc(&msm_fb->dirtyfb);
+
+ atomic_inc(&msm_fb->prepare_count);
+
+ for (i = 0; i < n; i++) {
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
+ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
+ fb->base.id, i, msm_fb->iova[i], ret);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace,
+ bool needed_dirtyfb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = fb->format->num_planes;
+
+ if (needed_dirtyfb)
+ refcount_dec(&msm_fb->dirtyfb);
+
+ for (i = 0; i < n; i++)
+ msm_gem_unpin_iova(fb->obj[i], aspace);
+
+ if (!atomic_dec_return(&msm_fb->prepare_count))
+ memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
+}
+
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->iova[plane] + fb->offsets[plane];
+}
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
+{
+ return drm_gem_fb_get_obj(fb, plane);
+}
+
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->format;
+}
+
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
+ struct drm_gem_object *bos[4] = {0};
+ struct drm_framebuffer *fb;
+ int ret, i, n = info->num_planes;
+
+ for (i = 0; i < n; i++) {
+ bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
+ if (!bos[i]) {
+ ret = -ENXIO;
+ goto out_unref;
+ }
+ }
+
+ fb = msm_framebuffer_init(dev, mode_cmd, bos);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto out_unref;
+ }
+
+ return fb;
+
+out_unref:
+ for (i = 0; i < n; i++)
+ drm_gem_object_put(bos[i]);
+ return ERR_PTR(ret);
+}
+
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+ const struct drm_format_info *info = drm_get_format_info(dev,
+ mode_cmd);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_framebuffer *msm_fb = NULL;
+ struct drm_framebuffer *fb;
+ const struct msm_format *format;
+ int ret, i, n;
+
+ drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
+ mode_cmd, mode_cmd->width, mode_cmd->height,
+ (char *)&mode_cmd->pixel_format);
+
+ n = info->num_planes;
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
+ if (!format) {
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
+ (char *)&mode_cmd->pixel_format);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
+ if (!msm_fb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = &msm_fb->base;
+
+ msm_fb->format = format;
+
+ if (n > ARRAY_SIZE(fb->obj)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ for (i = 0; i < n; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
+ unsigned int min_size;
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * info->cpp[i]
+ + mode_cmd->offsets[i];
+
+ if (bos[i]->size < min_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb->base.obj[i] = bos[i];
+ }
+
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+
+ ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ refcount_set(&msm_fb->dirtyfb, 1);
+
+ drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
+
+ return fb;
+
+fail:
+ kfree(msm_fb);
+
+ return ERR_PTR(ret);
+}
+
+struct drm_framebuffer *
+msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = {
+ .pixel_format = format,
+ .width = w,
+ .height = h,
+ .pitches = { p },
+ };
+ struct drm_gem_object *bo;
+ struct drm_framebuffer *fb;
+ int size;
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
+ if (IS_ERR(bo)) {
+ dev_warn(dev->dev, "could not allocate stolen bo\n");
+ /* try regular bo: */
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ }
+ if (IS_ERR(bo)) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
+ return ERR_CAST(bo);
+ }
+
+ msm_gem_object_set_name(bo, "stolenfb");
+
+ fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
+ if (IS_ERR(fb)) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_put(bo);
+ return ERR_CAST(fb);
+ }
+
+ return fb;
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 000000000..d4a9b501e
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_prime.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_kms.h"
+
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
+
+struct msm_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+};
+
+static const struct fb_ops msm_fb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = drm_fb_helper_sys_read,
+ .fb_write = drm_fb_helper_sys_write,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_mmap = msm_fbdev_mmap,
+};
+
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
+
+ return drm_gem_prime_mmap(bo, vma);
+}
+
+static int msm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_gem_object *bo;
+ struct fb_info *fbi = NULL;
+ uint64_t paddr;
+ uint32_t format;
+ int ret, pitch;
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
+ fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
+ sizes->surface_height, pitch, format);
+
+ if (IS_ERR(fb)) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
+ return PTR_ERR(fb);
+ }
+
+ bo = msm_framebuffer_bo(fb, 0);
+
+ /*
+ * NOTE: if we can be guaranteed to be able to map buffer
+ * in panic (ie. lock-safe, etc) we could avoid pinning the
+ * buffer now:
+ */
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ goto fail;
+ }
+
+ fbi = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(fbi)) {
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
+ ret = PTR_ERR(fbi);
+ goto fail;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+
+ fbi->fbops = &msm_fb_ops;
+
+ drm_fb_helper_fill_info(fbi, helper, sizes);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = msm_gem_get_vaddr(bo);
+ if (IS_ERR(fbi->screen_base)) {
+ ret = PTR_ERR(fbi->screen_base);
+ goto fail;
+ }
+ fbi->screen_size = bo->size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = bo->size;
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ return 0;
+
+fail:
+ drm_framebuffer_remove(fb);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+ .fb_probe = msm_fbdev_create,
+};
+
+/* initialize fbdev helper */
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ goto fail;
+
+ helper = &fbdev->base;
+
+ drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(dev, helper);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ /* the fw fb could be anywhere in memory */
+ ret = drm_aperture_remove_framebuffers(dev->driver);
+ if (ret)
+ goto fini;
+
+ ret = drm_fb_helper_initial_config(helper, 32);
+ if (ret)
+ goto fini;
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fini:
+ drm_fb_helper_fini(helper);
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void msm_fbdev_free(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct msm_fbdev *fbdev;
+
+ DBG();
+
+ drm_fb_helper_unregister_fbi(helper);
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_msm_fbdev(priv->fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb) {
+ struct drm_gem_object *bo =
+ msm_framebuffer_bo(fbdev->fb, 0);
+ msm_gem_put_vaddr(bo);
+ drm_framebuffer_remove(fbdev->fb);
+ }
+
+ kfree(fbdev);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
new file mode 100644
index 000000000..56641408e
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/dma-fence.h>
+
+#include "msm_drv.h"
+#include "msm_fence.h"
+
+
+struct msm_fence_context *
+msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
+ const char *name)
+{
+ struct msm_fence_context *fctx;
+ static int index = 0;
+
+ fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return ERR_PTR(-ENOMEM);
+
+ fctx->dev = dev;
+ strscpy(fctx->name, name, sizeof(fctx->name));
+ fctx->context = dma_fence_context_alloc(1);
+ fctx->index = index++;
+ fctx->fenceptr = fenceptr;
+ spin_lock_init(&fctx->spinlock);
+
+ /*
+ * Start out close to the 32b fence rollover point, so we can
+ * catch bugs with fence comparisons.
+ */
+ fctx->last_fence = 0xffffff00;
+ fctx->completed_fence = fctx->last_fence;
+ *fctx->fenceptr = fctx->last_fence;
+
+ return fctx;
+}
+
+void msm_fence_context_free(struct msm_fence_context *fctx)
+{
+ kfree(fctx);
+}
+
+bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
+{
+ /*
+ * Note: Check completed_fence first, as fenceptr is in a write-combine
+ * mapping, so it will be more expensive to read.
+ */
+ return (int32_t)(fctx->completed_fence - fence) >= 0 ||
+ (int32_t)(*fctx->fenceptr - fence) >= 0;
+}
+
+/* called from irq handler and workqueue (in recover path) */
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->spinlock, flags);
+ if (fence_after(fence, fctx->completed_fence))
+ fctx->completed_fence = fence;
+ spin_unlock_irqrestore(&fctx->spinlock, flags);
+}
+
+struct msm_fence {
+ struct dma_fence base;
+ struct msm_fence_context *fctx;
+};
+
+static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct msm_fence, base);
+}
+
+static const char *msm_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "msm";
+}
+
+static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct msm_fence *f = to_msm_fence(fence);
+ return f->fctx->name;
+}
+
+static bool msm_fence_signaled(struct dma_fence *fence)
+{
+ struct msm_fence *f = to_msm_fence(fence);
+ return msm_fence_completed(f->fctx, f->base.seqno);
+}
+
+static const struct dma_fence_ops msm_fence_ops = {
+ .get_driver_name = msm_fence_get_driver_name,
+ .get_timeline_name = msm_fence_get_timeline_name,
+ .signaled = msm_fence_signaled,
+};
+
+struct dma_fence *
+msm_fence_alloc(struct msm_fence_context *fctx)
+{
+ struct msm_fence *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return ERR_PTR(-ENOMEM);
+
+ f->fctx = fctx;
+
+ dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+ fctx->context, ++fctx->last_fence);
+
+ return &f->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
new file mode 100644
index 000000000..7f1798c54
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_FENCE_H__
+#define __MSM_FENCE_H__
+
+#include "msm_drv.h"
+
+/**
+ * struct msm_fence_context - fence context for gpu
+ *
+ * Each ringbuffer has a single fence context, with the GPU writing an
+ * incrementing fence seqno at the end of each submit
+ */
+struct msm_fence_context {
+ struct drm_device *dev;
+ /** name: human readable name for fence timeline */
+ char name[32];
+ /** context: see dma_fence_context_alloc() */
+ unsigned context;
+ /** index: similar to context, but local to msm_fence_context's */
+ unsigned index;
+
+ /**
+ * last_fence:
+ *
+ * Last assigned fence, incremented each time a fence is created
+ * on this fence context. If last_fence == completed_fence,
+ * there is no remaining pending work
+ */
+ uint32_t last_fence;
+
+ /**
+ * completed_fence:
+ *
+ * The last completed fence, updated from the CPU after interrupt
+ * from GPU
+ */
+ uint32_t completed_fence;
+
+ /**
+ * fenceptr:
+ *
+ * The address that the GPU directly writes with completed fence
+ * seqno. This can be ahead of completed_fence. We can peek at
+ * this to see if a fence has already signaled but the CPU hasn't
+ * gotten around to handling the irq and updating completed_fence
+ */
+ volatile uint32_t *fenceptr;
+
+ spinlock_t spinlock;
+};
+
+struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
+ volatile uint32_t *fenceptr, const char *name);
+void msm_fence_context_free(struct msm_fence_context *fctx);
+
+bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence);
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
+
+struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
+
+static inline bool
+fence_before(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) < 0;
+}
+
+static inline bool
+fence_after(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) > 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 000000000..1dee0d18a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,1294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/dma-map-ops.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+#include <linux/pfn_t.h>
+
+#include <drm/drm_prime.h>
+
+#include "msm_drv.h"
+#include "msm_fence.h"
+#include "msm_gem.h"
+#include "msm_gpu.h"
+#include "msm_mmu.h"
+
+static void update_lru(struct drm_gem_object *obj);
+
+static dma_addr_t physaddr(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
+ priv->vram.paddr;
+}
+
+static bool use_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ return !msm_obj->vram_node;
+}
+
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API. Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
+}
+
+/* allocate pages from VRAM carveout, used when no IOMMU: */
+static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ dma_addr_t paddr;
+ struct page **p;
+ int ret, i;
+
+ p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&priv->vram.lock);
+ ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
+ spin_unlock(&priv->vram.lock);
+ if (ret) {
+ kvfree(p);
+ return ERR_PTR(ret);
+ }
+
+ paddr = physaddr(obj);
+ for (i = 0; i < npages; i++) {
+ p[i] = pfn_to_page(__phys_to_pfn(paddr));
+ paddr += PAGE_SIZE;
+ }
+
+ return p;
+}
+
+static struct page **get_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(obj);
+
+ if (!msm_obj->pages) {
+ struct drm_device *dev = obj->dev;
+ struct page **p;
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (use_pages(obj))
+ p = drm_gem_get_pages(obj);
+ else
+ p = get_pages_vram(obj, npages);
+
+ if (IS_ERR(p)) {
+ DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
+ PTR_ERR(p));
+ return p;
+ }
+
+ msm_obj->pages = p;
+
+ msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
+ if (IS_ERR(msm_obj->sgt)) {
+ void *ptr = ERR_CAST(msm_obj->sgt);
+
+ DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
+ msm_obj->sgt = NULL;
+ return ptr;
+ }
+
+ /* For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & MSM_BO_WC)
+ sync_for_device(msm_obj);
+
+ update_lru(obj);
+ }
+
+ return msm_obj->pages;
+}
+
+static void put_pages_vram(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ spin_lock(&priv->vram.lock);
+ drm_mm_remove_node(msm_obj->vram_node);
+ spin_unlock(&priv->vram.lock);
+
+ kvfree(msm_obj->pages);
+}
+
+static void put_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (msm_obj->pages) {
+ if (msm_obj->sgt) {
+ /* For non-cached buffers, ensure the new
+ * pages are clean because display controller,
+ * GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & MSM_BO_WC)
+ sync_for_cpu(msm_obj);
+
+ sg_free_table(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+ msm_obj->sgt = NULL;
+ }
+
+ if (use_pages(obj))
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ else
+ put_pages_vram(obj);
+
+ msm_obj->pages = NULL;
+ update_lru(obj);
+ }
+}
+
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
+
+ msm_gem_assert_locked(obj);
+
+ if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ return ERR_PTR(-EBUSY);
+ }
+
+ p = get_pages(obj);
+ if (!IS_ERR(p)) {
+ to_msm_bo(obj)->pin_count++;
+ update_lru(obj);
+ }
+
+ return p;
+}
+
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
+{
+ struct page **p;
+
+ msm_gem_lock(obj);
+ p = msm_gem_pin_pages_locked(obj);
+ msm_gem_unlock(obj);
+
+ return p;
+}
+
+void msm_gem_unpin_pages(struct drm_gem_object *obj)
+{
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
+ msm_gem_unlock(obj);
+}
+
+static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
+{
+ if (msm_obj->flags & MSM_BO_WC)
+ return pgprot_writecombine(prot);
+ return prot;
+}
+
+static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ unsigned long pfn;
+ pgoff_t pgoff;
+ int err;
+ vm_fault_t ret;
+
+ /*
+ * vm_ops.open/drm_gem_mmap_obj and close get and put
+ * a reference on obj. So, we dont need to hold one here.
+ */
+ err = msm_gem_lock_interruptible(obj);
+ if (err) {
+ ret = VM_FAULT_NOPAGE;
+ goto out;
+ }
+
+ if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ msm_gem_unlock(obj);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* make sure we have pages attached now */
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ ret = vmf_error(PTR_ERR(pages));
+ goto out_unlock;
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(pages[pgoff]);
+
+ VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
+ pfn, pfn << PAGE_SHIFT);
+
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
+out_unlock:
+ msm_gem_unlock(obj);
+out:
+ return ret;
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ msm_gem_assert_locked(obj);
+
+ /* Make it mmapable */
+ ret = drm_gem_create_mmap_offset(obj);
+
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
+ return 0;
+ }
+
+ return drm_vma_node_offset_addr(&obj->vma_node);
+}
+
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ uint64_t offset;
+
+ msm_gem_lock(obj);
+ offset = mmap_offset(obj);
+ msm_gem_unlock(obj);
+ return offset;
+}
+
+static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ msm_gem_assert_locked(obj);
+
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (!vma)
+ return ERR_PTR(-ENOMEM);
+
+ vma->aspace = aspace;
+
+ list_add_tail(&vma->list, &msm_obj->vmas);
+
+ return vma;
+}
+
+static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ msm_gem_assert_locked(obj);
+
+ list_for_each_entry(vma, &msm_obj->vmas, list) {
+ if (vma->aspace == aspace)
+ return vma;
+ }
+
+ return NULL;
+}
+
+static void del_vma(struct msm_gem_vma *vma)
+{
+ if (!vma)
+ return;
+
+ list_del(&vma->list);
+ kfree(vma);
+}
+
+/*
+ * If close is true, this also closes the VMA (releasing the allocated
+ * iova range) in addition to removing the iommu mapping. In the eviction
+ * case (!close), we keep the iova allocated, but only remove the iommu
+ * mapping.
+ */
+static void
+put_iova_spaces(struct drm_gem_object *obj, bool close)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ msm_gem_assert_locked(obj);
+
+ list_for_each_entry(vma, &msm_obj->vmas, list) {
+ if (vma->aspace) {
+ msm_gem_purge_vma(vma->aspace, vma);
+ if (close)
+ msm_gem_close_vma(vma->aspace, vma);
+ }
+ }
+}
+
+/* Called with msm_obj locked */
+static void
+put_iova_vmas(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma, *tmp;
+
+ msm_gem_assert_locked(obj);
+
+ list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
+ del_vma(vma);
+ }
+}
+
+static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace,
+ u64 range_start, u64 range_end)
+{
+ struct msm_gem_vma *vma;
+
+ msm_gem_assert_locked(obj);
+
+ vma = lookup_vma(obj, aspace);
+
+ if (!vma) {
+ int ret;
+
+ vma = add_vma(obj, aspace);
+ if (IS_ERR(vma))
+ return vma;
+
+ ret = msm_gem_init_vma(aspace, vma, obj->size,
+ range_start, range_end);
+ if (ret) {
+ del_vma(vma);
+ return ERR_PTR(ret);
+ }
+ } else {
+ GEM_WARN_ON(vma->iova < range_start);
+ GEM_WARN_ON((vma->iova + obj->size) > range_end);
+ }
+
+ return vma;
+}
+
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ int ret, prot = IOMMU_READ;
+
+ if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
+ prot |= IOMMU_WRITE;
+
+ if (msm_obj->flags & MSM_BO_MAP_PRIV)
+ prot |= IOMMU_PRIV;
+
+ if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
+ prot |= IOMMU_CACHE;
+
+ msm_gem_assert_locked(obj);
+
+ if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+ return -EBUSY;
+
+ pages = msm_gem_pin_pages_locked(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
+ if (ret)
+ msm_gem_unpin_locked(obj);
+
+ return ret;
+}
+
+void msm_gem_unpin_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(obj);
+
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+
+ update_lru(obj);
+}
+
+struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ return get_vma_locked(obj, aspace, 0, U64_MAX);
+}
+
+static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
+{
+ struct msm_gem_vma *vma;
+ int ret;
+
+ msm_gem_assert_locked(obj);
+
+ vma = get_vma_locked(obj, aspace, range_start, range_end);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ ret = msm_gem_pin_vma_locked(obj, vma);
+ if (!ret)
+ *iova = vma->iova;
+
+ return ret;
+}
+
+/*
+ * get iova and pin it. Should have a matching put
+ * limits iova to specified range (in pages)
+ */
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
+{
+ int ret;
+
+ msm_gem_lock(obj);
+ ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
+ msm_gem_unlock(obj);
+
+ return ret;
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+}
+
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_vma *vma;
+ int ret = 0;
+
+ msm_gem_lock(obj);
+ vma = get_vma_locked(obj, aspace, 0, U64_MAX);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else {
+ *iova = vma->iova;
+ }
+ msm_gem_unlock(obj);
+
+ return ret;
+}
+
+static int clear_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_vma *vma = lookup_vma(obj, aspace);
+
+ if (!vma)
+ return 0;
+
+ if (msm_gem_vma_inuse(vma))
+ return -EBUSY;
+
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
+ del_vma(vma);
+
+ return 0;
+}
+
+/*
+ * Get the requested iova but don't pin it. Fails if the requested iova is
+ * not available. Doesn't need a put because iovas are currently valid for
+ * the life of the object.
+ *
+ * Setting an iova of zero will clear the vma.
+ */
+int msm_gem_set_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t iova)
+{
+ int ret = 0;
+
+ msm_gem_lock(obj);
+ if (!iova) {
+ ret = clear_iova(obj, aspace);
+ } else {
+ struct msm_gem_vma *vma;
+ vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else if (GEM_WARN_ON(vma->iova != iova)) {
+ clear_iova(obj, aspace);
+ ret = -EBUSY;
+ }
+ }
+ msm_gem_unlock(obj);
+
+ return ret;
+}
+
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_vma *vma;
+
+ msm_gem_lock(obj);
+ vma = lookup_vma(obj, aspace);
+ if (!GEM_WARN_ON(!vma)) {
+ msm_gem_unpin_vma(vma);
+ msm_gem_unpin_locked(obj);
+ }
+ msm_gem_unlock(obj);
+}
+
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = align_pitch(args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+ return msm_gem_new_handle(dev, file, args->size,
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
+}
+
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = msm_gem_mmap_offset(obj);
+
+ drm_gem_object_put(obj);
+
+fail:
+ return ret;
+}
+
+static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ msm_gem_assert_locked(obj);
+
+ if (obj->import_attach)
+ return ERR_PTR(-ENODEV);
+
+ if (GEM_WARN_ON(msm_obj->madv > madv)) {
+ DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* increment vmap_count *before* vmap() call, so shrinker can
+ * check vmap_count (is_vunmapable()) outside of msm_obj lock.
+ * This guarantees that we won't try to msm_gem_vunmap() this
+ * same object from within the vmap() call (while we already
+ * hold msm_obj lock)
+ */
+ msm_obj->vmap_count++;
+
+ if (!msm_obj->vaddr) {
+ struct page **pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto fail;
+ }
+ msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
+ if (msm_obj->vaddr == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ update_lru(obj);
+ }
+
+ return msm_obj->vaddr;
+
+fail:
+ msm_obj->vmap_count--;
+ return ERR_PTR(ret);
+}
+
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
+{
+ return get_vaddr(obj, MSM_MADV_WILLNEED);
+}
+
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
+{
+ void *ret;
+
+ msm_gem_lock(obj);
+ ret = msm_gem_get_vaddr_locked(obj);
+ msm_gem_unlock(obj);
+
+ return ret;
+}
+
+/*
+ * Don't use this! It is for the very special case of dumping
+ * submits from GPU hangs or faults, were the bo may already
+ * be MSM_MADV_DONTNEED, but we know the buffer is still on the
+ * active list.
+ */
+void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
+{
+ return get_vaddr(obj, __MSM_MADV_PURGED);
+}
+
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(obj);
+ GEM_WARN_ON(msm_obj->vmap_count < 1);
+
+ msm_obj->vmap_count--;
+}
+
+void msm_gem_put_vaddr(struct drm_gem_object *obj)
+{
+ msm_gem_lock(obj);
+ msm_gem_put_vaddr_locked(obj);
+ msm_gem_unlock(obj);
+}
+
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_lock(obj);
+
+ if (msm_obj->madv != __MSM_MADV_PURGED)
+ msm_obj->madv = madv;
+
+ madv = msm_obj->madv;
+
+ /* If the obj is inactive, we might need to move it
+ * between inactive lists
+ */
+ update_lru(obj);
+
+ msm_gem_unlock(obj);
+
+ return (madv != __MSM_MADV_PURGED);
+}
+
+void msm_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(obj);
+ GEM_WARN_ON(!is_purgeable(msm_obj));
+
+ /* Get rid of any iommu mapping(s): */
+ put_iova_spaces(obj, true);
+
+ msm_gem_vunmap(obj);
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
+ put_pages(obj);
+
+ put_iova_vmas(obj);
+
+ msm_obj->madv = __MSM_MADV_PURGED;
+
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+
+/*
+ * Unpin the backing pages and make them available to be swapped out.
+ */
+void msm_gem_evict(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(obj);
+ GEM_WARN_ON(is_unevictable(msm_obj));
+
+ /* Get rid of any iommu mapping(s): */
+ put_iova_spaces(obj, false);
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
+ put_pages(obj);
+}
+
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(obj);
+
+ if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
+ return;
+
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
+}
+
+static void update_lru(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(&msm_obj->base);
+
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+ GEM_WARN_ON(msm_obj->vmap_count);
+
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
+ } else if (msm_obj->pin_count || msm_obj->vmap_count) {
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail(&priv->lru.willneed, obj);
+ } else {
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
+ }
+}
+
+bool msm_gem_active(struct drm_gem_object *obj)
+{
+ msm_gem_assert_locked(obj);
+
+ if (to_msm_bo(obj)->pin_count)
+ return true;
+
+ return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
+}
+
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
+{
+ bool write = !!(op & MSM_PREP_WRITE);
+ unsigned long remain =
+ op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
+ long ret;
+
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
+ if (ret == 0)
+ return remain == 0 ? -EBUSY : -ETIMEDOUT;
+ else if (ret < 0)
+ return ret;
+
+ /* TODO cache maintenance */
+
+ return 0;
+}
+
+int msm_gem_cpu_fini(struct drm_gem_object *obj)
+{
+ /* TODO cache maintenance */
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+ struct msm_gem_stats *stats)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct dma_resv *robj = obj->resv;
+ struct msm_gem_vma *vma;
+ uint64_t off = drm_vma_node_start(&obj->vma_node);
+ const char *madv;
+
+ msm_gem_lock(obj);
+
+ stats->all.count++;
+ stats->all.size += obj->size;
+
+ if (msm_gem_active(obj)) {
+ stats->active.count++;
+ stats->active.size += obj->size;
+ }
+
+ if (msm_obj->pages) {
+ stats->resident.count++;
+ stats->resident.size += obj->size;
+ }
+
+ switch (msm_obj->madv) {
+ case __MSM_MADV_PURGED:
+ stats->purged.count++;
+ stats->purged.size += obj->size;
+ madv = " purged";
+ break;
+ case MSM_MADV_DONTNEED:
+ stats->purgeable.count++;
+ stats->purgeable.size += obj->size;
+ madv = " purgeable";
+ break;
+ case MSM_MADV_WILLNEED:
+ default:
+ madv = "";
+ break;
+ }
+
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
+ msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
+ obj->name, kref_read(&obj->refcount),
+ off, msm_obj->vaddr);
+
+ seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
+
+ if (!list_empty(&msm_obj->vmas)) {
+
+ seq_puts(m, " vmas:");
+
+ list_for_each_entry(vma, &msm_obj->vmas, list) {
+ const char *name, *comm;
+ if (vma->aspace) {
+ struct msm_gem_address_space *aspace = vma->aspace;
+ struct task_struct *task =
+ get_pid_task(aspace->pid, PIDTYPE_PID);
+ if (task) {
+ comm = kstrdup(task->comm, GFP_KERNEL);
+ put_task_struct(task);
+ } else {
+ comm = NULL;
+ }
+ name = aspace->name;
+ } else {
+ name = comm = NULL;
+ }
+ seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
+ name, comm ? ":" : "", comm ? comm : "",
+ vma->aspace, vma->iova,
+ vma->mapped ? "mapped" : "unmapped",
+ msm_gem_vma_inuse(vma));
+ kfree(comm);
+ }
+
+ seq_puts(m, "\n");
+ }
+
+ dma_resv_describe(robj, m);
+ msm_gem_unlock(obj);
+}
+
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+ struct msm_gem_stats stats = {};
+ struct msm_gem_object *msm_obj;
+
+ seq_puts(m, " flags id ref offset kaddr size madv name\n");
+ list_for_each_entry(msm_obj, list, node) {
+ struct drm_gem_object *obj = &msm_obj->base;
+ seq_puts(m, " ");
+ msm_gem_describe(obj, m, &stats);
+ }
+
+ seq_printf(m, "Total: %4d objects, %9zu bytes\n",
+ stats.all.count, stats.all.size);
+ seq_printf(m, "Active: %4d objects, %9zu bytes\n",
+ stats.active.count, stats.active.size);
+ seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
+ stats.resident.count, stats.resident.size);
+ seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
+ stats.purgeable.count, stats.purgeable.size);
+ seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
+ stats.purged.count, stats.purged.size);
+}
+#endif
+
+/* don't call directly! Use drm_gem_object_put() */
+static void msm_gem_free_object(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ mutex_lock(&priv->obj_lock);
+ list_del(&msm_obj->node);
+ mutex_unlock(&priv->obj_lock);
+
+ put_iova_spaces(obj, true);
+
+ if (obj->import_attach) {
+ GEM_WARN_ON(msm_obj->vaddr);
+
+ /* Don't drop the pages for imported dmabuf, as they are not
+ * ours, just free the array we allocated:
+ */
+ kvfree(msm_obj->pages);
+
+ put_iova_vmas(obj);
+
+ drm_prime_gem_destroy(obj, msm_obj->sgt);
+ } else {
+ msm_gem_vunmap(obj);
+ put_pages(obj);
+ put_iova_vmas(obj);
+ }
+
+ drm_gem_object_release(obj);
+
+ kfree(msm_obj);
+}
+
+static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle,
+ char *name)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = msm_gem_new(dev, size, flags);
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (name)
+ msm_gem_object_set_name(obj, "%s", name);
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs msm_gem_object_funcs = {
+ .free = msm_gem_free_object,
+ .pin = msm_gem_prime_pin,
+ .unpin = msm_gem_prime_unpin,
+ .get_sg_table = msm_gem_prime_get_sg_table,
+ .vmap = msm_gem_prime_vmap,
+ .vunmap = msm_gem_prime_vunmap,
+ .mmap = msm_gem_object_mmap,
+ .vm_ops = &vm_ops,
+};
+
+static int msm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags,
+ struct drm_gem_object **obj)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+
+ switch (flags & MSM_BO_CACHE_MASK) {
+ case MSM_BO_CACHED:
+ case MSM_BO_WC:
+ break;
+ case MSM_BO_CACHED_COHERENT:
+ if (priv->has_cached_coherent)
+ break;
+ fallthrough;
+ default:
+ DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
+ (flags & MSM_BO_CACHE_MASK));
+ return -EINVAL;
+ }
+
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+ if (!msm_obj)
+ return -ENOMEM;
+
+ msm_obj->flags = flags;
+ msm_obj->madv = MSM_MADV_WILLNEED;
+
+ INIT_LIST_HEAD(&msm_obj->node);
+ INIT_LIST_HEAD(&msm_obj->vmas);
+
+ *obj = &msm_obj->base;
+ (*obj)->funcs = &msm_gem_object_funcs;
+
+ return 0;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj = NULL;
+ bool use_vram = false;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+
+ if (!msm_use_mmu(dev))
+ use_vram = true;
+ else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
+ use_vram = true;
+
+ if (GEM_WARN_ON(use_vram && !priv->vram.size))
+ return ERR_PTR(-EINVAL);
+
+ /* Disallow zero sized objects as they make the underlying
+ * infrastructure grumpy
+ */
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+ ret = msm_gem_new_impl(dev, size, flags, &obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ msm_obj = to_msm_bo(obj);
+
+ if (use_vram) {
+ struct msm_gem_vma *vma;
+ struct page **pages;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ msm_gem_lock(obj);
+
+ vma = add_vma(obj, NULL);
+ msm_gem_unlock(obj);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto fail;
+ }
+
+ to_msm_bo(obj)->vram_node = &vma->node;
+
+ msm_gem_lock(obj);
+ pages = get_pages(obj);
+ msm_gem_unlock(obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto fail;
+ }
+
+ vma->iova = physaddr(obj);
+ } else {
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
+ }
+
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
+
+ mutex_lock(&priv->obj_lock);
+ list_add_tail(&msm_obj->node, &priv->objects);
+ mutex_unlock(&priv->obj_lock);
+
+ return obj;
+
+fail:
+ drm_gem_object_put(obj);
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ struct dma_buf *dmabuf, struct sg_table *sgt)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj;
+ uint32_t size;
+ int ret, npages;
+
+ /* if we don't have IOMMU, don't bother pretending we can import: */
+ if (!msm_use_mmu(dev)) {
+ DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = PAGE_ALIGN(dmabuf->size);
+
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ npages = size / PAGE_SIZE;
+
+ msm_obj = to_msm_bo(obj);
+ msm_gem_lock(obj);
+ msm_obj->sgt = sgt;
+ msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!msm_obj->pages) {
+ msm_gem_unlock(obj);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
+ if (ret) {
+ msm_gem_unlock(obj);
+ goto fail;
+ }
+
+ msm_gem_unlock(obj);
+
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
+
+ mutex_lock(&priv->obj_lock);
+ list_add_tail(&msm_obj->node, &priv->objects);
+ mutex_unlock(&priv->obj_lock);
+
+ return obj;
+
+fail:
+ drm_gem_object_put(obj);
+ return ERR_PTR(ret);
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ void *vaddr;
+ struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
+ int ret;
+
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ if (iova) {
+ ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ if (ret)
+ goto err;
+ }
+
+ vaddr = msm_gem_get_vaddr(obj);
+ if (IS_ERR(vaddr)) {
+ msm_gem_unpin_iova(obj, aspace);
+ ret = PTR_ERR(vaddr);
+ goto err;
+ }
+
+ if (bo)
+ *bo = obj;
+
+ return vaddr;
+err:
+ drm_gem_object_put(obj);
+
+ return ERR_PTR(ret);
+
+}
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace)
+{
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ msm_gem_put_vaddr(bo);
+ msm_gem_unpin_iova(bo, aspace);
+ drm_gem_object_put(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(bo);
+ va_list ap;
+
+ if (!fmt)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+ va_end(ap);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 000000000..c4844cf3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_GEM_H__
+#define __MSM_GEM_H__
+
+#include <linux/kref.h>
+#include <linux/dma-resv.h>
+#include "drm/gpu_scheduler.h"
+#include "msm_drv.h"
+
+/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
+ * tend to go wrong 1000s of times in a short timespan.
+ */
+#define GEM_WARN_ON(x) WARN_RATELIMIT(x, "%s", __stringify(x))
+
+/* Additional internal-use only BO flags: */
+#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
+
+struct msm_gem_address_space {
+ const char *name;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ spinlock_t lock; /* Protects drm_mm node allocation/removal */
+ struct msm_mmu *mmu;
+ struct kref kref;
+
+ /* For address spaces associated with a specific process, this
+ * will be non-NULL:
+ */
+ struct pid *pid;
+
+ /* @faults: the number of GPU hangs associated with this address space */
+ int faults;
+
+ /** @va_start: lowest possible address to allocate */
+ uint64_t va_start;
+
+ /** @va_size: the size of the address space (in bytes) */
+ uint64_t va_size;
+};
+
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size);
+
+struct msm_fence_context;
+
+struct msm_gem_vma {
+ struct drm_mm_node node;
+ uint64_t iova;
+ struct msm_gem_address_space *aspace;
+ struct list_head list; /* node in msm_gem_object::vmas */
+ bool mapped;
+ int inuse;
+ uint32_t fence_mask;
+ uint32_t fence[MSM_GPU_MAX_RINGS];
+ struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
+};
+
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int size,
+ u64 range_start, u64 range_end);
+bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
+void msm_gem_unpin_vma(struct msm_gem_vma *vma);
+void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int prot,
+ struct sg_table *sgt, int size);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
+
+struct msm_gem_object {
+ struct drm_gem_object base;
+
+ uint32_t flags;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ uint8_t madv;
+
+ /**
+ * count of active vmap'ing
+ */
+ uint8_t vmap_count;
+
+ /**
+ * Node in list of all objects (mainly for debugfs, protected by
+ * priv->obj_lock
+ */
+ struct list_head node;
+
+ struct page **pages;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct list_head vmas; /* list of msm_gem_vma */
+
+ /* For physically contiguous buffers. Used when we don't have
+ * an IOMMU. Also used for stolen/splashscreen buffer.
+ */
+ struct drm_mm_node *vram_node;
+
+ char name[32]; /* Identifier to print for the debugfs files */
+
+ int pin_count;
+};
+#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+void msm_gem_unpin_locked(struct drm_gem_object *obj);
+struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_set_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t iova);
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
+void msm_gem_unpin_pages(struct drm_gem_object *obj);
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
+void msm_gem_put_vaddr(struct drm_gem_object *obj);
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
+bool msm_gem_active(struct drm_gem_object *obj);
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
+int msm_gem_cpu_fini(struct drm_gem_object *obj);
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle, char *name);
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace);
+struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ struct dma_buf *dmabuf, struct sg_table *sgt);
+__printf(2, 3)
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
+#ifdef CONFIG_DEBUG_FS
+struct msm_gem_stats {
+ struct {
+ unsigned count;
+ size_t size;
+ } all, active, resident, purgeable, purged;
+};
+
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+ struct msm_gem_stats *stats);
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
+#endif
+
+static inline void
+msm_gem_lock(struct drm_gem_object *obj)
+{
+ dma_resv_lock(obj->resv, NULL);
+}
+
+static inline int
+msm_gem_lock_interruptible(struct drm_gem_object *obj)
+{
+ return dma_resv_lock_interruptible(obj->resv, NULL);
+}
+
+static inline void
+msm_gem_unlock(struct drm_gem_object *obj)
+{
+ dma_resv_unlock(obj->resv);
+}
+
+static inline void
+msm_gem_assert_locked(struct drm_gem_object *obj)
+{
+ /*
+ * Destroying the object is a special case.. msm_gem_free_object()
+ * calls many things that WARN_ON if the obj lock is not held. But
+ * acquiring the obj lock in msm_gem_free_object() can cause a
+ * locking order inversion between reservation_ww_class_mutex and
+ * fs_reclaim.
+ *
+ * This deadlock is not actually possible, because no one should
+ * be already holding the lock when msm_gem_free_object() is called.
+ * Unfortunately lockdep is not aware of this detail. So when the
+ * refcount drops to zero, we pretend it is already locked.
+ */
+ lockdep_assert_once(
+ (kref_read(&obj->refcount) == 0) ||
+ (lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD)
+ );
+}
+
+/* imported/exported objects are not purgeable: */
+static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
+{
+ return msm_obj->base.import_attach || msm_obj->pin_count;
+}
+
+static inline bool is_purgeable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
+ !is_unpurgeable(msm_obj);
+}
+
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+ msm_gem_assert_locked(&msm_obj->base);
+ return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
+
+static inline bool is_unevictable(struct msm_gem_object *msm_obj)
+{
+ return is_unpurgeable(msm_obj) || msm_obj->vaddr;
+}
+
+void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_evict(struct drm_gem_object *obj);
+void msm_gem_vunmap(struct drm_gem_object *obj);
+
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+ * associated with the cmdstream submission for synchronization (and
+ * make it easier to unwind when things go wrong, etc).
+ */
+struct msm_gem_submit {
+ struct drm_sched_job base;
+ struct kref ref;
+ struct drm_device *dev;
+ struct msm_gpu *gpu;
+ struct msm_gem_address_space *aspace;
+ struct list_head node; /* node in ring submit list */
+ struct ww_acquire_ctx ticket;
+ uint32_t seqno; /* Sequence number of the submit on the ring */
+
+ /* Hw fence, which is created when the scheduler executes the job, and
+ * is signaled when the hw finishes (via seqno write from cmdstream)
+ */
+ struct dma_fence *hw_fence;
+
+ /* Userspace visible fence, which is signaled by the scheduler after
+ * the hw_fence is signaled.
+ */
+ struct dma_fence *user_fence;
+
+ int fence_id; /* key into queue->fence_idr */
+ struct msm_gpu_submitqueue *queue;
+ struct pid *pid; /* submitting process */
+ bool fault_dumped; /* Limit devcoredump dumping to one per submit */
+ bool valid; /* true if no cmdstream patching needed */
+ bool in_rb; /* "sudo" mode, copy cmds into RB */
+ struct msm_ringbuffer *ring;
+ unsigned int nr_cmds;
+ unsigned int nr_bos;
+ u32 ident; /* A "identifier" for the submit for logging */
+ struct {
+ uint32_t type;
+ uint32_t size; /* in dwords */
+ uint64_t iova;
+ uint32_t offset;/* in dwords */
+ uint32_t idx; /* cmdstream buffer idx in bos[] */
+ uint32_t nr_relocs;
+ struct drm_msm_gem_submit_reloc *relocs;
+ } *cmd; /* array of size nr_cmds */
+ struct {
+/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
+#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
+#define BO_LOCKED 0x4000 /* obj lock is held */
+#define BO_OBJ_PINNED 0x2000 /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */
+ uint32_t flags;
+ union {
+ struct msm_gem_object *obj;
+ uint32_t handle;
+ };
+ uint64_t iova;
+ struct msm_gem_vma *vma;
+ } bos[];
+};
+
+static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
+{
+ return container_of(job, struct msm_gem_submit, base);
+}
+
+void __msm_gem_submit_destroy(struct kref *kref);
+
+static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
+{
+ kref_get(&submit->ref);
+}
+
+static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
+{
+ kref_put(&submit->ref, __msm_gem_submit_destroy);
+}
+
+void msm_submit_retire(struct msm_gem_submit *submit);
+
+/* helper to determine of a buffer in submit should be dumped, used for both
+ * devcoredump and debugfs cmdstream dumping:
+ */
+static inline bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ extern bool rd_full;
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
+#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
new file mode 100644
index 000000000..c1d91863d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/dma-buf.h>
+
+#include <drm/drm_prime.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ /* Ensure the mmap offset is initialized. We lazily initialize it,
+ * so if it has not been first mmap'd directly as a GEM object, the
+ * mmap offset will not be already initialized.
+ */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ return ret;
+
+ return drm_gem_prime_mmap(obj, vma);
+}
+
+struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
+ return ERR_PTR(-ENOMEM);
+
+ return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
+}
+
+int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ void *vaddr;
+
+ vaddr = msm_gem_get_vaddr(obj);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+}
+
+void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ msm_gem_put_vaddr(obj);
+}
+
+struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg)
+{
+ return msm_gem_import(dev, attach->dmabuf, sg);
+}
+
+int msm_gem_prime_pin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach)
+ msm_gem_pin_pages(obj);
+ return 0;
+}
+
+void msm_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ if (!obj->import_attach)
+ msm_gem_unpin_pages(obj);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
new file mode 100644
index 000000000..31f054c90
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_gpu.h"
+#include "msm_gpu_trace.h"
+
+/* Default disabled for now until it has some more testing on the different
+ * iommu combinations that can be paired with the driver:
+ */
+static bool enable_eviction = false;
+MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
+module_param(enable_eviction, bool, 0600);
+
+static bool can_swap(void)
+{
+ return enable_eviction && get_nr_swap_pages() > 0;
+}
+
+static bool can_block(struct shrink_control *sc)
+{
+ if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
+ return false;
+ return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
+}
+
+static unsigned long
+msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ unsigned count = priv->lru.dontneed.count;
+
+ if (can_swap())
+ count += priv->lru.willneed.count;
+
+ return count;
+}
+
+static bool
+purge(struct drm_gem_object *obj)
+{
+ if (!is_purgeable(to_msm_bo(obj)))
+ return false;
+
+ if (msm_gem_active(obj))
+ return false;
+
+ msm_gem_purge(obj);
+
+ return true;
+}
+
+static bool
+evict(struct drm_gem_object *obj)
+{
+ if (is_unevictable(to_msm_bo(obj)))
+ return false;
+
+ if (msm_gem_active(obj))
+ return false;
+
+ msm_gem_evict(obj);
+
+ return true;
+}
+
+static bool
+wait_for_idle(struct drm_gem_object *obj)
+{
+ enum dma_resv_usage usage = dma_resv_usage_rw(true);
+ return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
+}
+
+static bool
+active_purge(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
+
+ return purge(obj);
+}
+
+static bool
+active_evict(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
+
+ return evict(obj);
+}
+
+static unsigned long
+msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ struct {
+ struct drm_gem_lru *lru;
+ bool (*shrink)(struct drm_gem_object *obj);
+ bool cond;
+ unsigned long freed;
+ unsigned long remaining;
+ } stages[] = {
+ /* Stages of progressively more aggressive/expensive reclaim: */
+ { &priv->lru.dontneed, purge, true },
+ { &priv->lru.willneed, evict, can_swap() },
+ { &priv->lru.dontneed, active_purge, can_block(sc) },
+ { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
+ };
+ long nr = sc->nr_to_scan;
+ unsigned long freed = 0;
+ unsigned long remaining = 0;
+
+ for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
+ if (!stages[i].cond)
+ continue;
+ stages[i].freed =
+ drm_gem_lru_scan(stages[i].lru, nr,
+ &stages[i].remaining,
+ stages[i].shrink);
+ nr -= stages[i].freed;
+ freed += stages[i].freed;
+ remaining += stages[i].remaining;
+ }
+
+ if (freed) {
+ trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
+ stages[1].freed, stages[2].freed,
+ stages[3].freed);
+ }
+
+ return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
+}
+
+#ifdef CONFIG_DEBUG_FS
+unsigned long
+msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct shrink_control sc = {
+ .nr_to_scan = nr_to_scan,
+ };
+ int ret;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
+ fs_reclaim_release(GFP_KERNEL);
+
+ return ret;
+}
+#endif
+
+/* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+static const int vmap_shrink_limit = 15;
+
+static bool
+vmap_shrink(struct drm_gem_object *obj)
+{
+ if (!is_vunmapable(to_msm_bo(obj)))
+ return false;
+
+ msm_gem_vunmap(obj);
+
+ return true;
+}
+
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct msm_drm_private *priv =
+ container_of(nb, struct msm_drm_private, vmap_notifier);
+ struct drm_gem_lru *lrus[] = {
+ &priv->lru.dontneed,
+ &priv->lru.willneed,
+ &priv->lru.pinned,
+ NULL,
+ };
+ unsigned idx, unmapped = 0;
+ unsigned long remaining = 0;
+
+ for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ unmapped += drm_gem_lru_scan(lrus[idx],
+ vmap_shrink_limit - unmapped,
+ &remaining,
+ vmap_shrink);
+ }
+
+ *(unsigned long *)ptr += unmapped;
+
+ if (unmapped > 0)
+ trace_msm_gem_purge_vmaps(unmapped);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * msm_gem_shrinker_init - Initialize msm shrinker
+ * @dev: drm device
+ *
+ * This function registers and sets up the msm shrinker.
+ */
+void msm_gem_shrinker_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->shrinker.count_objects = msm_gem_shrinker_count;
+ priv->shrinker.scan_objects = msm_gem_shrinker_scan;
+ priv->shrinker.seeks = DEFAULT_SEEKS;
+ WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
+
+ priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
+}
+
+/**
+ * msm_gem_shrinker_cleanup - Clean up msm shrinker
+ * @dev: drm device
+ *
+ * This function unregisters the msm shrinker.
+ */
+void msm_gem_shrinker_cleanup(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (priv->shrinker.nr_deferred) {
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+ }
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 000000000..c12a6ac2d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/file.h>
+#include <linux/sync_file.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_gpu_trace.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+static struct msm_gem_submit *submit_create(struct drm_device *dev,
+ struct msm_gpu *gpu,
+ struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
+ uint32_t nr_cmds)
+{
+ static atomic_t ident = ATOMIC_INIT(0);
+ struct msm_gem_submit *submit;
+ uint64_t sz;
+ int ret;
+
+ sz = struct_size(submit, bos, nr_bos) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
+
+ if (sz > SIZE_MAX)
+ return ERR_PTR(-ENOMEM);
+
+ submit = kzalloc(sz, GFP_KERNEL);
+ if (!submit)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_sched_job_init(&submit->base, queue->entity, queue);
+ if (ret) {
+ kfree(submit);
+ return ERR_PTR(ret);
+ }
+
+ kref_init(&submit->ref);
+ submit->dev = dev;
+ submit->aspace = queue->ctx->aspace;
+ submit->gpu = gpu;
+ submit->cmd = (void *)&submit->bos[nr_bos];
+ submit->queue = queue;
+ submit->pid = get_pid(task_pid(current));
+ submit->ring = gpu->rb[queue->ring_nr];
+ submit->fault_dumped = false;
+
+ /* Get a unique identifier for the submission for logging purposes */
+ submit->ident = atomic_inc_return(&ident) - 1;
+
+ INIT_LIST_HEAD(&submit->node);
+
+ return submit;
+}
+
+void __msm_gem_submit_destroy(struct kref *kref)
+{
+ struct msm_gem_submit *submit =
+ container_of(kref, struct msm_gem_submit, ref);
+ unsigned i;
+
+ if (submit->fence_id) {
+ spin_lock(&submit->queue->idr_lock);
+ idr_remove(&submit->queue->fence_idr, submit->fence_id);
+ spin_unlock(&submit->queue->idr_lock);
+ }
+
+ dma_fence_put(submit->user_fence);
+ dma_fence_put(submit->hw_fence);
+
+ put_pid(submit->pid);
+ msm_submitqueue_put(submit->queue);
+
+ for (i = 0; i < submit->nr_cmds; i++)
+ kfree(submit->cmd[i].relocs);
+
+ kfree(submit);
+}
+
+static int submit_lookup_objects(struct msm_gem_submit *submit,
+ struct drm_msm_gem_submit *args, struct drm_file *file)
+{
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_msm_gem_submit_bo submit_bo;
+ void __user *userptr =
+ u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ submit->bos[i].flags = 0;
+
+ if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
+ ret = -EFAULT;
+ i = 0;
+ goto out;
+ }
+
+/* at least one of READ and/or WRITE flags should be set: */
+#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
+ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+ !(submit_bo.flags & MANDATORY_FLAGS)) {
+ DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
+ ret = -EINVAL;
+ i = 0;
+ goto out;
+ }
+
+ submit->bos[i].handle = submit_bo.handle;
+ submit->bos[i].flags = submit_bo.flags;
+ /* in validate_objects() we figure out if this is true: */
+ submit->bos[i].iova = submit_bo.presumed;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_gem_object *obj;
+
+ /* normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, submit->bos[i].handle);
+ if (!obj) {
+ DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ drm_gem_object_get(obj);
+
+ submit->bos[i].obj = to_msm_bo(obj);
+ }
+
+out_unlock:
+ spin_unlock(&file->table_lock);
+
+out:
+ submit->nr_bos = i;
+
+ return ret;
+}
+
+static int submit_lookup_cmds(struct msm_gem_submit *submit,
+ struct drm_msm_gem_submit *args, struct drm_file *file)
+{
+ unsigned i;
+ size_t sz;
+ int ret = 0;
+
+ for (i = 0; i < args->nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd submit_cmd;
+ void __user *userptr =
+ u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+
+ ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* validate input from userspace: */
+ switch (submit_cmd.type) {
+ case MSM_SUBMIT_CMD_BUF:
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ break;
+ default:
+ DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
+ return -EINVAL;
+ }
+
+ if (submit_cmd.size % 4) {
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ submit->cmd[i].type = submit_cmd.type;
+ submit->cmd[i].size = submit_cmd.size / 4;
+ submit->cmd[i].offset = submit_cmd.submit_offset / 4;
+ submit->cmd[i].idx = submit_cmd.submit_idx;
+ submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
+
+ userptr = u64_to_user_ptr(submit_cmd.relocs);
+
+ sz = array_size(submit_cmd.nr_relocs,
+ sizeof(struct drm_msm_gem_submit_reloc));
+ /* check for overflow: */
+ if (sz == SIZE_MAX) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
+ if (!submit->cmd[i].relocs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+/* Unwind bo state, according to cleanup_flags. In the success case, only
+ * the lock is dropped at the end of the submit (and active/pin ref is dropped
+ * later when the submit is retired).
+ */
+static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
+ unsigned cleanup_flags)
+{
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ unsigned flags = submit->bos[i].flags & cleanup_flags;
+
+ /*
+ * Clear flags bit before dropping lock, so that the msm_job_run()
+ * path isn't racing with submit_cleanup() (ie. the read/modify/
+ * write is protected by the obj lock in all paths)
+ */
+ submit->bos[i].flags &= ~cleanup_flags;
+
+ if (flags & BO_VMA_PINNED)
+ msm_gem_unpin_vma(submit->bos[i].vma);
+
+ if (flags & BO_OBJ_PINNED)
+ msm_gem_unpin_locked(obj);
+
+ if (flags & BO_LOCKED)
+ dma_resv_unlock(obj->resv);
+}
+
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+{
+ unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
+ submit_cleanup_bo(submit, i, cleanup_flags);
+
+ if (!(submit->bos[i].flags & BO_VALID))
+ submit->bos[i].iova = 0;
+}
+
+/* This is where we make sure all the bo's are reserved and pin'd: */
+static int submit_lock_objects(struct msm_gem_submit *submit)
+{
+ int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+
+ if (slow_locked == i)
+ slow_locked = -1;
+
+ contended = i;
+
+ if (!(submit->bos[i].flags & BO_LOCKED)) {
+ ret = dma_resv_lock_interruptible(msm_obj->base.resv,
+ &submit->ticket);
+ if (ret)
+ goto fail;
+ submit->bos[i].flags |= BO_LOCKED;
+ }
+ }
+
+ ww_acquire_done(&submit->ticket);
+
+ return 0;
+
+fail:
+ if (ret == -EALREADY) {
+ DRM_ERROR("handle %u at index %u already on submit list\n",
+ submit->bos[i].handle, i);
+ ret = -EINVAL;
+ }
+
+ for (; i >= 0; i--)
+ submit_unlock_unpin_bo(submit, i);
+
+ if (slow_locked > 0)
+ submit_unlock_unpin_bo(submit, slow_locked);
+
+ if (ret == -EDEADLK) {
+ struct msm_gem_object *msm_obj = submit->bos[contended].obj;
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
+ &submit->ticket);
+ if (!ret) {
+ submit->bos[contended].flags |= BO_LOCKED;
+ slow_locked = contended;
+ goto retry;
+ }
+
+ /* Not expecting -EALREADY here, if the bo was already
+ * locked, we should have gotten -EALREADY already from
+ * the dma_resv_lock_interruptable() call.
+ */
+ WARN_ON_ONCE(ret == -EALREADY);
+ }
+
+ return ret;
+}
+
+static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
+
+ /* NOTE: _reserve_shared() must happen before
+ * _add_shared_fence(), which makes this a slightly
+ * strange place to call it. OTOH this is a
+ * convenient can-fail point to hook it in.
+ */
+ ret = dma_resv_reserve_fences(obj->resv, 1);
+ if (ret)
+ return ret;
+
+ /* exclusive fences must be ordered */
+ if (no_implicit && !write)
+ continue;
+
+ ret = drm_sched_job_add_implicit_dependencies(&submit->base,
+ obj,
+ write);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int submit_pin_objects(struct msm_gem_submit *submit)
+{
+ int i, ret = 0;
+
+ submit->valid = true;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ struct msm_gem_vma *vma;
+
+ /* if locking succeeded, pin bo: */
+ vma = msm_gem_get_vma_locked(obj, submit->aspace);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ break;
+ }
+
+ ret = msm_gem_pin_vma_locked(obj, vma);
+ if (ret)
+ break;
+
+ submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
+ submit->bos[i].vma = vma;
+
+ if (vma->iova == submit->bos[i].iova) {
+ submit->bos[i].flags |= BO_VALID;
+ } else {
+ submit->bos[i].iova = vma->iova;
+ /* iova changed, so address in cmdstream is not valid: */
+ submit->bos[i].flags &= ~BO_VALID;
+ submit->valid = false;
+ }
+ }
+
+ return ret;
+}
+
+static void submit_attach_object_fences(struct msm_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_WRITE);
+ else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_READ);
+ }
+}
+
+static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
+{
+ if (idx >= submit->nr_bos) {
+ DRM_ERROR("invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
+ return -EINVAL;
+ }
+
+ if (obj)
+ *obj = submit->bos[idx].obj;
+ if (iova)
+ *iova = submit->bos[idx].iova;
+ if (valid)
+ *valid = !!(submit->bos[idx].flags & BO_VALID);
+
+ return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
+ uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
+{
+ uint32_t i, last_offset = 0;
+ uint32_t *ptr;
+ int ret = 0;
+
+ if (!nr_relocs)
+ return 0;
+
+ if (offset % 4) {
+ DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
+ return -EINVAL;
+ }
+
+ /* For now, just map the entire thing. Eventually we probably
+ * to do it page-by-page, w/ kmap() if not vmap()d..
+ */
+ ptr = msm_gem_get_vaddr_locked(&obj->base);
+
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ DBG("failed to map: %d", ret);
+ return ret;
+ }
+
+ for (i = 0; i < nr_relocs; i++) {
+ struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
+ uint32_t off;
+ uint64_t iova;
+ bool valid;
+
+ if (submit_reloc.submit_offset % 4) {
+ DRM_ERROR("non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* offset in dwords: */
+ off = submit_reloc.submit_offset / 4;
+
+ if ((off >= (obj->base.size / 4)) ||
+ (off < last_offset)) {
+ DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ if (ret)
+ goto out;
+
+ if (valid)
+ continue;
+
+ iova += submit_reloc.reloc_offset;
+
+ if (submit_reloc.shift < 0)
+ iova >>= -submit_reloc.shift;
+ else
+ iova <<= submit_reloc.shift;
+
+ ptr[off] = iova | submit_reloc.or;
+
+ last_offset = off;
+ }
+
+out:
+ msm_gem_put_vaddr_locked(&obj->base);
+
+ return ret;
+}
+
+/* Cleanup submit at end of ioctl. In the error case, this also drops
+ * references, unpins, and drops active refcnt. In the non-error case,
+ * this is done when the submit is retired.
+ */
+static void submit_cleanup(struct msm_gem_submit *submit, bool error)
+{
+ unsigned cleanup_flags = BO_LOCKED;
+ unsigned i;
+
+ if (error)
+ cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ submit_cleanup_bo(submit, i, cleanup_flags);
+ if (error)
+ drm_gem_object_put(&msm_obj->base);
+ }
+}
+
+void msm_submit_retire(struct msm_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ drm_gem_object_put(obj);
+ }
+}
+
+struct msm_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ uint64_t point;
+ struct dma_fence_chain *chain;
+};
+
+static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride,
+ struct msm_ringbuffer *ring)
+{
+ struct drm_syncobj **syncobjs = NULL;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!syncobjs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_in_syncobjs; ++i) {
+ uint64_t address = in_syncobjs_addr + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.point &&
+ !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ ret = drm_sched_job_add_dependency(&submit->base, fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
+ syncobjs[i] =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ if (syncobjs[j])
+ drm_syncobj_put(syncobjs[j]);
+ }
+ kfree(syncobjs);
+ return ERR_PTR(ret);
+ }
+ return syncobjs;
+}
+
+static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
+ uint32_t nr_syncobjs)
+{
+ uint32_t i;
+
+ for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride)
+{
+ struct msm_submit_post_dep *post_deps;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!post_deps)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_syncobjs; ++i) {
+ uint64_t address = syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ if (!drm_core_check_feature(dev,
+ DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ dma_fence_chain_free(post_deps[j].chain);
+ if (post_deps[j].syncobj)
+ drm_syncobj_put(post_deps[j].syncobj);
+ }
+
+ kfree(post_deps);
+ return ERR_PTR(ret);
+ }
+
+ return post_deps;
+}
+
+static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence)
+{
+ uint32_t i;
+
+ for (i = 0; post_deps && i < count; ++i) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+}
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_gem_submit *args = data;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_gem_submit *submit = NULL;
+ struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_submitqueue *queue;
+ struct msm_ringbuffer *ring;
+ struct msm_submit_post_dep *post_deps = NULL;
+ struct drm_syncobj **syncobjs_to_reset = NULL;
+ int out_fence_fd = -1;
+ bool has_ww_ticket = false;
+ unsigned i;
+ int ret;
+
+ if (!gpu)
+ return -ENXIO;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
+ DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
+ return -EPERM;
+ }
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
+ return -EINVAL;
+
+ if (args->flags & MSM_SUBMIT_SUDO) {
+ if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
+ !capable(CAP_SYS_RAWIO))
+ return -EINVAL;
+ }
+
+ queue = msm_submitqueue_get(ctx, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ring = gpu->rb[queue->ring_nr];
+
+ if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto out_post_unlock;
+ }
+ }
+
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ if (IS_ERR(submit)) {
+ ret = PTR_ERR(submit);
+ goto out_post_unlock;
+ }
+
+ trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
+ args->nr_bos, args->nr_cmds);
+
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
+
+ if (args->flags & MSM_SUBMIT_SUDO)
+ submit->in_rb = true;
+
+ if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
+ in_fence = sync_file_get_fence(args->fence_fd);
+
+ if (!in_fence) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = drm_sched_job_add_dependency(&submit->base, in_fence);
+ if (ret)
+ goto out_unlock;
+ }
+
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
+ syncobjs_to_reset = msm_parse_deps(submit, file,
+ args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride, ring);
+ if (IS_ERR(syncobjs_to_reset)) {
+ ret = PTR_ERR(syncobjs_to_reset);
+ goto out_unlock;
+ }
+ }
+
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
+ post_deps = msm_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(post_deps)) {
+ ret = PTR_ERR(post_deps);
+ goto out_unlock;
+ }
+ }
+
+ ret = submit_lookup_objects(submit, args, file);
+ if (ret)
+ goto out;
+
+ ret = submit_lookup_cmds(submit, args, file);
+ if (ret)
+ goto out;
+
+ /* copy_*_user while holding a ww ticket upsets lockdep */
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ has_ww_ticket = true;
+ ret = submit_lock_objects(submit);
+ if (ret)
+ goto out;
+
+ ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
+ if (ret)
+ goto out;
+
+ ret = submit_pin_objects(submit);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < args->nr_cmds; i++) {
+ struct msm_gem_object *msm_obj;
+ uint64_t iova;
+
+ ret = submit_bo(submit, submit->cmd[i].idx,
+ &msm_obj, &iova, NULL);
+ if (ret)
+ goto out;
+
+ if (!submit->cmd[i].size ||
+ ((submit->cmd[i].size + submit->cmd[i].offset) >
+ msm_obj->base.size / 4)) {
+ DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
+
+ if (submit->valid)
+ continue;
+
+ ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
+ submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
+ if (ret)
+ goto out;
+ }
+
+ submit->nr_cmds = i;
+
+ spin_lock(&queue->idr_lock);
+
+ /*
+ * If using userspace provided seqno fence, validate that the id
+ * is available before arming sched job. Since access to fence_idr
+ * is serialized on the queue lock, the slot should be still avail
+ * after the job is armed
+ */
+ if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
+ (!args->fence || idr_find(&queue->fence_idr, args->fence))) {
+ spin_unlock(&queue->idr_lock);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ drm_sched_job_arm(&submit->base);
+
+ submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
+
+ if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
+ /*
+ * Userspace has assigned the seqno fence that it wants
+ * us to use. It is an error to pick a fence sequence
+ * number that is not available.
+ */
+ submit->fence_id = args->fence;
+ ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
+ &submit->fence_id, submit->fence_id,
+ GFP_KERNEL);
+ /*
+ * We've already validated that the fence_id slot is valid,
+ * so if idr_alloc_u32 failed, it is a kernel bug
+ */
+ WARN_ON(ret);
+ } else {
+ /*
+ * Allocate an id which can be used by WAIT_FENCE ioctl to map
+ * back to the underlying fence.
+ */
+ submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
+ submit->user_fence, 1,
+ INT_MAX, GFP_KERNEL);
+ }
+
+ spin_unlock(&queue->idr_lock);
+
+ if (submit->fence_id < 0) {
+ ret = submit->fence_id;
+ submit->fence_id = 0;
+ }
+
+ if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ struct sync_file *sync_file = sync_file_create(submit->user_fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ } else {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
+ }
+ }
+
+ submit_attach_object_fences(submit);
+
+ /* The scheduler owns a ref now: */
+ msm_gem_submit_get(submit);
+
+ drm_sched_entity_push_job(&submit->base);
+
+ args->fence = submit->fence_id;
+ queue->last_fence = submit->fence_id;
+
+ msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_process_post_deps(post_deps, args->nr_out_syncobjs,
+ submit->user_fence);
+
+
+out:
+ submit_cleanup(submit, !!ret);
+ if (has_ww_ticket)
+ ww_acquire_fini(&submit->ticket);
+out_unlock:
+ mutex_unlock(&queue->lock);
+out_post_unlock:
+ if (ret && (out_fence_fd >= 0))
+ put_unused_fd(out_fence_fd);
+
+ if (!IS_ERR_OR_NULL(submit)) {
+ msm_gem_submit_put(submit);
+ } else {
+ /*
+ * If the submit hasn't yet taken ownership of the queue
+ * then we need to drop the reference ourself:
+ */
+ msm_submitqueue_put(queue);
+ }
+ if (!IS_ERR_OR_NULL(post_deps)) {
+ for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+ kfree(post_deps);
+ }
+
+ if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
+ for (i = 0; i < args->nr_in_syncobjs; ++i) {
+ if (syncobjs_to_reset[i])
+ drm_syncobj_put(syncobjs_to_reset[i]);
+ }
+ kfree(syncobjs_to_reset);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 000000000..c471aebcd
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "msm_drv.h"
+#include "msm_fence.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+ struct msm_gem_address_space *aspace = container_of(kref,
+ struct msm_gem_address_space, kref);
+
+ drm_mm_takedown(&aspace->mm);
+ if (aspace->mmu)
+ aspace->mmu->funcs->destroy(aspace->mmu);
+ put_pid(aspace->pid);
+ kfree(aspace);
+}
+
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+ if (aspace)
+ kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace)
+{
+ if (!IS_ERR_OR_NULL(aspace))
+ kref_get(&aspace->kref);
+
+ return aspace;
+}
+
+bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
+{
+ if (vma->inuse > 0)
+ return true;
+
+ while (vma->fence_mask) {
+ unsigned idx = ffs(vma->fence_mask) - 1;
+
+ if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
+ return true;
+
+ vma->fence_mask &= ~BIT(idx);
+ }
+
+ return false;
+}
+
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ unsigned size = vma->node.size;
+
+ /* Print a message if we try to purge a vma in use */
+ GEM_WARN_ON(msm_gem_vma_inuse(vma));
+
+ /* Don't do anything if the memory isn't mapped */
+ if (!vma->mapped)
+ return;
+
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+
+ vma->mapped = false;
+}
+
+/* Remove reference counts for the mapping */
+void msm_gem_unpin_vma(struct msm_gem_vma *vma)
+{
+ if (GEM_WARN_ON(!vma->inuse))
+ return;
+ if (!GEM_WARN_ON(!vma->iova))
+ vma->inuse--;
+}
+
+/* Replace pin reference with fence: */
+void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
+{
+ vma->fctx[fctx->index] = fctx;
+ vma->fence[fctx->index] = fctx->last_fence;
+ vma->fence_mask |= BIT(fctx->index);
+ msm_gem_unpin_vma(vma);
+}
+
+/* Map and pin vma: */
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int prot,
+ struct sg_table *sgt, int size)
+{
+ int ret = 0;
+
+ if (GEM_WARN_ON(!vma->iova))
+ return -EINVAL;
+
+ /* Increase the usage counter */
+ vma->inuse++;
+
+ if (vma->mapped)
+ return 0;
+
+ vma->mapped = true;
+
+ if (aspace && aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, prot);
+
+ if (ret) {
+ vma->mapped = false;
+ vma->inuse--;
+ }
+
+ return ret;
+}
+
+/* Close an iova. Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);
+
+ spin_lock(&aspace->lock);
+ if (vma->iova)
+ drm_mm_remove_node(&vma->node);
+ spin_unlock(&aspace->lock);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
+/* Initialize a new vma and allocate an iova for it */
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int size,
+ u64 range_start, u64 range_end)
+{
+ int ret;
+
+ if (GEM_WARN_ON(vma->iova))
+ return -EBUSY;
+
+ spin_lock(&aspace->lock);
+ ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
+ size, PAGE_SIZE, 0,
+ range_start, range_end, 0);
+ spin_unlock(&aspace->lock);
+
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start;
+ vma->mapped = false;
+
+ kref_get(&aspace->kref);
+
+ return 0;
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size)
+{
+ struct msm_gem_address_space *aspace;
+
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&aspace->lock);
+ aspace->name = name;
+ aspace->mmu = mmu;
+ aspace->va_start = va_start;
+ aspace->va_size = size;
+
+ drm_mm_init(&aspace->mm, va_start, size);
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 000000000..380249500
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "drm/drm_drv.h"
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "msm_fence.h"
+#include "msm_gpu_trace.h"
+#include "adreno/adreno_gpu.h"
+
+#include <generated/utsrelease.h>
+#include <linux/string_helpers.h>
+#include <linux/devcoredump.h>
+#include <linux/reset.h>
+#include <linux/sched/task.h>
+
+/*
+ * Power Management:
+ */
+
+static int enable_pwrrail(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret = 0;
+
+ if (gpu->gpu_reg) {
+ ret = regulator_enable(gpu->gpu_reg);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (gpu->gpu_cx) {
+ ret = regulator_enable(gpu->gpu_cx);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int disable_pwrrail(struct msm_gpu *gpu)
+{
+ if (gpu->gpu_cx)
+ regulator_disable(gpu->gpu_cx);
+ if (gpu->gpu_reg)
+ regulator_disable(gpu->gpu_reg);
+ return 0;
+}
+
+static int enable_clk(struct msm_gpu *gpu)
+{
+ if (gpu->core_clk && gpu->fast_rate)
+ clk_set_rate(gpu->core_clk, gpu->fast_rate);
+
+ /* Set the RBBM timer rate to 19.2Mhz */
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 19200000);
+
+ return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
+}
+
+static int disable_clk(struct msm_gpu *gpu)
+{
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
+
+ /*
+ * Set the clock to a deliberately low rate. On older targets the clock
+ * speed had to be non zero to avoid problems. On newer targets this
+ * will be rounded down to zero anyway so it all works out.
+ */
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, 27000000);
+
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 0);
+
+ return 0;
+}
+
+static int enable_axi(struct msm_gpu *gpu)
+{
+ return clk_prepare_enable(gpu->ebi1_clk);
+}
+
+static int disable_axi(struct msm_gpu *gpu)
+{
+ clk_disable_unprepare(gpu->ebi1_clk);
+ return 0;
+}
+
+int msm_gpu_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ DBG("%s", gpu->name);
+ trace_msm_gpu_resume(0);
+
+ ret = enable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_axi(gpu);
+ if (ret)
+ return ret;
+
+ msm_devfreq_resume(gpu);
+
+ gpu->needs_hw_init = true;
+
+ return 0;
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu)
+{
+ int ret;
+
+ DBG("%s", gpu->name);
+ trace_msm_gpu_suspend(0);
+
+ msm_devfreq_suspend(gpu);
+
+ ret = disable_axi(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ gpu->suspend_count++;
+
+ return 0;
+}
+
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ struct drm_printer *p)
+{
+ drm_printf(p, "drm-driver:\t%s\n", gpu->dev->driver->name);
+ drm_printf(p, "drm-client-id:\t%u\n", ctx->seqno);
+ drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
+ drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
+ drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
+}
+
+int msm_gpu_hw_init(struct msm_gpu *gpu)
+{
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&gpu->lock));
+
+ if (!gpu->needs_hw_init)
+ return 0;
+
+ disable_irq(gpu->irq);
+ ret = gpu->funcs->hw_init(gpu);
+ if (!ret)
+ gpu->needs_hw_init = false;
+ enable_irq(gpu->irq);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct msm_gpu *gpu = data;
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_gpu_state *state;
+
+ state = msm_gpu_crashstate_get(gpu);
+ if (!state)
+ return 0;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "---\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+ if (state->comm)
+ drm_printf(&p, "comm: %s\n", state->comm);
+ if (state->cmd)
+ drm_printf(&p, "cmdline: %s\n", state->cmd);
+
+ gpu->funcs->show(gpu, state, &p);
+
+ msm_gpu_crashstate_put(gpu);
+
+ return count - iter.remain;
+}
+
+static void msm_gpu_devcoredump_free(void *data)
+{
+ struct msm_gpu *gpu = data;
+
+ msm_gpu_crashstate_put(gpu);
+}
+
+static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
+ struct msm_gem_object *obj, u64 iova, bool full)
+{
+ struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+
+ /* Don't record write only objects */
+ state_bo->size = obj->base.size;
+ state_bo->iova = iova;
+
+ BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(obj->name));
+
+ memcpy(state_bo->name, obj->name, sizeof(state_bo->name));
+
+ if (full) {
+ void *ptr;
+
+ state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ if (!state_bo->data)
+ goto out;
+
+ msm_gem_lock(&obj->base);
+ ptr = msm_gem_get_vaddr_active(&obj->base);
+ msm_gem_unlock(&obj->base);
+ if (IS_ERR(ptr)) {
+ kvfree(state_bo->data);
+ state_bo->data = NULL;
+ goto out;
+ }
+
+ memcpy(state_bo->data, ptr, obj->base.size);
+ msm_gem_put_vaddr(&obj->base);
+ }
+out:
+ state->nr_bos++;
+}
+
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+ struct msm_gpu_state *state;
+
+ /* Check if the target supports capturing crash state */
+ if (!gpu->funcs->gpu_state_get)
+ return;
+
+ /* Only save one crash state at a time */
+ if (gpu->crashstate)
+ return;
+
+ state = gpu->funcs->gpu_state_get(gpu);
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ /* Fill in the additional crash state information */
+ state->comm = kstrdup(comm, GFP_KERNEL);
+ state->cmd = kstrdup(cmd, GFP_KERNEL);
+ state->fault_info = gpu->fault_info;
+
+ if (submit) {
+ int i;
+
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (i = 0; state->bos && i < submit->nr_bos; i++) {
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova,
+ should_dump(submit, i));
+ }
+ }
+
+ /* Set the active crash state to be dumped on failure */
+ gpu->crashstate = state;
+
+ /* FIXME: Release the crashstate if this errors out? */
+ dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
+}
+#else
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+}
+#endif
+
+/*
+ * Hangcheck detection for locked gpu:
+ */
+
+static struct msm_gem_submit *
+find_submit(struct msm_ringbuffer *ring, uint32_t fence)
+{
+ struct msm_gem_submit *submit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->submit_lock, flags);
+ list_for_each_entry(submit, &ring->submits, node) {
+ if (submit->seqno == fence) {
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+ return submit;
+ }
+ }
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ return NULL;
+}
+
+static void retire_submits(struct msm_gpu *gpu);
+
+static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
+{
+ struct msm_file_private *ctx = submit->queue->ctx;
+ struct task_struct *task;
+
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
+
+ /* Note that kstrdup will return NULL if argument is NULL: */
+ *comm = kstrdup(ctx->comm, GFP_KERNEL);
+ *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ return;
+
+ if (!*comm)
+ *comm = kstrdup(task->comm, GFP_KERNEL);
+
+ if (!*cmd)
+ *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+
+ put_task_struct(task);
+}
+
+static void recover_worker(struct kthread_work *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_submit *submit;
+ struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
+ int i;
+
+ mutex_lock(&gpu->lock);
+
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+
+ submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
+ if (submit) {
+ /* Increment the fault counts */
+ submit->queue->faults++;
+ if (submit->aspace)
+ submit->aspace->faults++;
+
+ get_comm_cmdline(submit, &comm, &cmd);
+
+ if (comm && cmd) {
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
+ gpu->name, comm, cmd);
+
+ msm_rd_dump_submit(priv->hangrd, submit,
+ "offending task: %s (%s)", comm, cmd);
+ } else {
+ msm_rd_dump_submit(priv->hangrd, submit, NULL);
+ }
+ } else {
+ /*
+ * We couldn't attribute this fault to any particular context,
+ * so increment the global fault count instead.
+ */
+ gpu->global_faults++;
+ }
+
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+
+ kfree(cmd);
+ kfree(comm);
+
+ /*
+ * Update all the rings with the latest and greatest fence.. this
+ * needs to happen after msm_rd_dump_submit() to ensure that the
+ * bo's referenced by the offending submit are still around.
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ uint32_t fence = ring->memptrs->fence;
+
+ /*
+ * For the current (faulting?) ring/submit advance the fence by
+ * one more to clear the faulting submit
+ */
+ if (ring == cur_ring)
+ ring->memptrs->fence = ++fence;
+
+ msm_update_fence(ring->fctx, fence);
+ }
+
+ if (msm_gpu_active(gpu)) {
+ /* retire completed submits, plus the one that hung: */
+ retire_submits(gpu);
+
+ gpu->funcs->recover(gpu);
+
+ /*
+ * Replay all remaining submits starting with highest priority
+ * ring
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->submit_lock, flags);
+ list_for_each_entry(submit, &ring->submits, node)
+ gpu->funcs->submit(gpu, submit);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+ }
+ }
+
+ pm_runtime_put(&gpu->pdev->dev);
+
+ mutex_unlock(&gpu->lock);
+
+ msm_gpu_retire(gpu);
+}
+
+static void fault_worker(struct kthread_work *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
+ struct msm_gem_submit *submit;
+ struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
+
+ mutex_lock(&gpu->lock);
+
+ submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
+ if (submit && submit->fault_dumped)
+ goto resume_smmu;
+
+ if (submit) {
+ get_comm_cmdline(submit, &comm, &cmd);
+
+ /*
+ * When we get GPU iova faults, we can get 1000s of them,
+ * but we really only want to log the first one.
+ */
+ submit->fault_dumped = true;
+ }
+
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ kfree(cmd);
+ kfree(comm);
+
+resume_smmu:
+ memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
+ gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+
+ mutex_unlock(&gpu->lock);
+}
+
+static void hangcheck_timer_reset(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ mod_timer(&gpu->hangcheck_timer,
+ round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
+}
+
+static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
+ return false;
+
+ if (!gpu->funcs->progress)
+ return false;
+
+ if (!gpu->funcs->progress(gpu, ring))
+ return false;
+
+ ring->hangcheck_progress_retries++;
+ return true;
+}
+
+static void hangcheck_handler(struct timer_list *t)
+{
+ struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
+ struct drm_device *dev = gpu->dev;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ uint32_t fence = ring->memptrs->fence;
+
+ if (fence != ring->hangcheck_fence) {
+ /* some progress has been made.. ya! */
+ ring->hangcheck_fence = fence;
+ ring->hangcheck_progress_retries = 0;
+ } else if (fence_before(fence, ring->fctx->last_fence) &&
+ !made_progress(gpu, ring)) {
+ /* no progress and not done.. hung! */
+ ring->hangcheck_fence = fence;
+ ring->hangcheck_progress_retries = 0;
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
+ DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
+ gpu->name, fence);
+ DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
+ gpu->name, ring->fctx->last_fence);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+ }
+
+ /* if still more pending work, reset the hangcheck timer: */
+ if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
+ hangcheck_timer_reset(gpu);
+
+ /* workaround for missing irq: */
+ msm_gpu_retire(gpu);
+}
+
+/*
+ * Performance Counters:
+ */
+
+/* called under perf_lock */
+static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
+{
+ uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
+ int i, n = min(ncntrs, gpu->num_perfcntrs);
+
+ /* read current values: */
+ for (i = 0; i < gpu->num_perfcntrs; i++)
+ current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
+
+ /* update cntrs: */
+ for (i = 0; i < n; i++)
+ cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
+
+ /* save current values: */
+ for (i = 0; i < gpu->num_perfcntrs; i++)
+ gpu->last_cntrs[i] = current_cntrs[i];
+
+ return n;
+}
+
+static void update_sw_cntrs(struct msm_gpu *gpu)
+{
+ ktime_t time;
+ uint32_t elapsed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+ if (!gpu->perfcntr_active)
+ goto out;
+
+ time = ktime_get();
+ elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
+
+ gpu->totaltime += elapsed;
+ if (gpu->last_sample.active)
+ gpu->activetime += elapsed;
+
+ gpu->last_sample.active = msm_gpu_active(gpu);
+ gpu->last_sample.time = time;
+
+out:
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+}
+
+void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+ /* we could dynamically enable/disable perfcntr registers too.. */
+ gpu->last_sample.active = msm_gpu_active(gpu);
+ gpu->last_sample.time = ktime_get();
+ gpu->activetime = gpu->totaltime = 0;
+ gpu->perfcntr_active = true;
+ update_hw_cntrs(gpu, 0, NULL);
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+}
+
+void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
+{
+ gpu->perfcntr_active = false;
+ pm_runtime_put_sync(&gpu->pdev->dev);
+}
+
+/* returns -errno or # of cntrs sampled */
+int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
+ uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&gpu->perf_lock, flags);
+
+ if (!gpu->perfcntr_active) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *activetime = gpu->activetime;
+ *totaltime = gpu->totaltime;
+
+ gpu->activetime = gpu->totaltime = 0;
+
+ ret = update_hw_cntrs(gpu, ncntrs, cntrs);
+
+out:
+ spin_unlock_irqrestore(&gpu->perf_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Cmdstream submission/retirement:
+ */
+
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_submit *submit)
+{
+ int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ volatile struct msm_gpu_submit_stats *stats;
+ u64 elapsed, clock = 0, cycles;
+ unsigned long flags;
+
+ stats = &ring->memptrs->stats[index];
+ /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
+ elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
+ do_div(elapsed, 192);
+
+ cycles = stats->cpcycles_end - stats->cpcycles_start;
+
+ /* Calculate the clock frequency from the number of CP cycles */
+ if (elapsed) {
+ clock = cycles * 1000;
+ do_div(clock, elapsed);
+ }
+
+ submit->queue->ctx->elapsed_ns += elapsed;
+ submit->queue->ctx->cycles += cycles;
+
+ trace_msm_gpu_submit_retired(submit, elapsed, clock,
+ stats->alwayson_start, stats->alwayson_end);
+
+ msm_submit_retire(submit);
+
+ pm_runtime_mark_last_busy(&gpu->pdev->dev);
+
+ spin_lock_irqsave(&ring->submit_lock, flags);
+ list_del(&submit->node);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ /* Update devfreq on transition from active->idle: */
+ mutex_lock(&gpu->active_lock);
+ gpu->active_submits--;
+ WARN_ON(gpu->active_submits < 0);
+ if (!gpu->active_submits) {
+ msm_devfreq_idle(gpu);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ }
+
+ mutex_unlock(&gpu->active_lock);
+
+ msm_gem_submit_put(submit);
+}
+
+static void retire_submits(struct msm_gpu *gpu)
+{
+ int i;
+
+ /* Retire the commits starting with highest priority */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ while (true) {
+ struct msm_gem_submit *submit = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->submit_lock, flags);
+ submit = list_first_entry_or_null(&ring->submits,
+ struct msm_gem_submit, node);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ /*
+ * If no submit, we are done. If submit->fence hasn't
+ * been signalled, then later submits are not signalled
+ * either, so we are also done.
+ */
+ if (submit && dma_fence_is_signaled(submit->hw_fence)) {
+ retire_submit(gpu, ring, submit);
+ } else {
+ break;
+ }
+ }
+ }
+
+ wake_up_all(&gpu->retire_event);
+}
+
+static void retire_worker(struct kthread_work *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+
+ retire_submits(gpu);
+}
+
+/* call from irq handler to schedule work to retire bo's */
+void msm_gpu_retire(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
+
+ kthread_queue_work(gpu->worker, &gpu->retire_work);
+ update_sw_cntrs(gpu);
+}
+
+/* add bo's to gpu's ring, and kick gpu: */
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned long flags;
+
+ WARN_ON(!mutex_is_locked(&gpu->lock));
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ msm_gpu_hw_init(gpu);
+
+ submit->seqno = submit->hw_fence->seqno;
+
+ msm_rd_dump_submit(priv->rd, submit, NULL);
+
+ update_sw_cntrs(gpu);
+
+ /*
+ * ring->submits holds a ref to the submit, to deal with the case
+ * that a submit completes before msm_ioctl_gem_submit() returns.
+ */
+ msm_gem_submit_get(submit);
+
+ spin_lock_irqsave(&ring->submit_lock, flags);
+ list_add_tail(&submit->node, &ring->submits);
+ spin_unlock_irqrestore(&ring->submit_lock, flags);
+
+ /* Update devfreq on transition from idle->active: */
+ mutex_lock(&gpu->active_lock);
+ if (!gpu->active_submits) {
+ pm_runtime_get(&gpu->pdev->dev);
+ msm_devfreq_active(gpu);
+ }
+ gpu->active_submits++;
+ mutex_unlock(&gpu->active_lock);
+
+ gpu->funcs->submit(gpu, submit);
+ gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+
+ pm_runtime_put(&gpu->pdev->dev);
+ hangcheck_timer_reset(gpu);
+}
+
+/*
+ * Init/Cleanup:
+ */
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct msm_gpu *gpu = data;
+ return gpu->funcs->irq(gpu);
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
+
+ if (ret < 1) {
+ gpu->nr_clocks = 0;
+ return ret;
+ }
+
+ gpu->nr_clocks = ret;
+
+ gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "core");
+
+ gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "rbbmtimer");
+
+ return 0;
+}
+
+/* Return a new address space for a msm_drm_private instance */
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+{
+ struct msm_gem_address_space *aspace = NULL;
+ if (!gpu)
+ return NULL;
+
+ /*
+ * If the target doesn't support private address spaces then return
+ * the global one
+ */
+ if (gpu->funcs->create_private_address_space) {
+ aspace = gpu->funcs->create_private_address_space(gpu);
+ if (!IS_ERR(aspace))
+ aspace->pid = get_pid(task_pid(task));
+ }
+
+ if (IS_ERR_OR_NULL(aspace))
+ aspace = msm_gem_address_space_get(gpu->aspace);
+
+ return aspace;
+}
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, struct msm_gpu_config *config)
+{
+ struct msm_drm_private *priv = drm->dev_private;
+ int i, ret, nr_rings = config->nr_rings;
+ void *memptrs;
+ uint64_t memptrs_iova;
+
+ if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
+ gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
+
+ gpu->dev = drm;
+ gpu->funcs = funcs;
+ gpu->name = name;
+
+ gpu->worker = kthread_create_worker(0, "gpu-worker");
+ if (IS_ERR(gpu->worker)) {
+ ret = PTR_ERR(gpu->worker);
+ gpu->worker = NULL;
+ goto fail;
+ }
+
+ sched_set_fifo_low(gpu->worker->task);
+
+ mutex_init(&gpu->active_lock);
+ mutex_init(&gpu->lock);
+ init_waitqueue_head(&gpu->retire_event);
+ kthread_init_work(&gpu->retire_work, retire_worker);
+ kthread_init_work(&gpu->recover_work, recover_worker);
+ kthread_init_work(&gpu->fault_work, fault_worker);
+
+ priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
+
+ /*
+ * If progress detection is supported, halve the hangcheck timer
+ * duration, as it takes two iterations of the hangcheck handler
+ * to detect a hang.
+ */
+ if (funcs->progress)
+ priv->hangcheck_period /= 2;
+
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
+
+ spin_lock_init(&gpu->perf_lock);
+
+
+ /* Map registers: */
+ gpu->mmio = msm_ioremap(pdev, config->ioname);
+ if (IS_ERR(gpu->mmio)) {
+ ret = PTR_ERR(gpu->mmio);
+ goto fail;
+ }
+
+ /* Get Interrupt: */
+ gpu->irq = platform_get_irq(pdev, 0);
+ if (gpu->irq < 0) {
+ ret = gpu->irq;
+ DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
+ IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
+ if (ret) {
+ DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ goto fail;
+ }
+
+ ret = get_clocks(pdev, gpu);
+ if (ret)
+ goto fail;
+
+ gpu->ebi1_clk = msm_clk_get(pdev, "bus");
+ DBG("ebi1_clk: %p", gpu->ebi1_clk);
+ if (IS_ERR(gpu->ebi1_clk))
+ gpu->ebi1_clk = NULL;
+
+ /* Acquire regulators: */
+ gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
+ DBG("gpu_reg: %p", gpu->gpu_reg);
+ if (IS_ERR(gpu->gpu_reg))
+ gpu->gpu_reg = NULL;
+
+ gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
+ DBG("gpu_cx: %p", gpu->gpu_cx);
+ if (IS_ERR(gpu->gpu_cx))
+ gpu->gpu_cx = NULL;
+
+ gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "cx_collapse");
+
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, &gpu->adreno_smmu);
+
+ msm_devfreq_init(gpu);
+
+
+ gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
+
+ if (gpu->aspace == NULL)
+ DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ else if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
+ goto fail;
+ }
+
+ memptrs = msm_gem_kernel_new(drm,
+ sizeof(struct msm_rbmemptrs) * nr_rings,
+ check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
+ &memptrs_iova);
+
+ if (IS_ERR(memptrs)) {
+ ret = PTR_ERR(memptrs);
+ DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
+ goto fail;
+ }
+
+ msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
+ if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+ DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
+ ARRAY_SIZE(gpu->rb));
+ nr_rings = ARRAY_SIZE(gpu->rb);
+ }
+
+ /* Create ringbuffer(s): */
+ for (i = 0; i < nr_rings; i++) {
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
+
+ if (IS_ERR(gpu->rb[i])) {
+ ret = PTR_ERR(gpu->rb[i]);
+ DRM_DEV_ERROR(drm->dev,
+ "could not create ringbuffer %d: %d\n", i, ret);
+ goto fail;
+ }
+
+ memptrs += sizeof(struct msm_rbmemptrs);
+ memptrs_iova += sizeof(struct msm_rbmemptrs);
+ }
+
+ gpu->nr_rings = nr_rings;
+
+ refcount_set(&gpu->sysprof_active, 1);
+
+ return 0;
+
+fail:
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ }
+
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+void msm_gpu_cleanup(struct msm_gpu *gpu)
+{
+ int i;
+
+ DBG("%s", gpu->name);
+
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ }
+
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
+
+ if (!IS_ERR_OR_NULL(gpu->aspace)) {
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
+ msm_gem_address_space_put(gpu->aspace);
+ }
+
+ if (gpu->worker) {
+ kthread_destroy_worker(gpu->worker);
+ }
+
+ msm_devfreq_cleanup(gpu);
+
+ platform_set_drvdata(gpu->pdev, NULL);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 000000000..b39cd3327
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,700 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_GPU_H__
+#define __MSM_GPU_H__
+
+#include <linux/adreno-smmu-priv.h>
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/interconnect.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include "msm_drv.h"
+#include "msm_fence.h"
+#include "msm_ringbuffer.h"
+#include "msm_gem.h"
+
+struct msm_gem_submit;
+struct msm_gpu_perfcntr;
+struct msm_gpu_state;
+struct msm_file_private;
+
+struct msm_gpu_config {
+ const char *ioname;
+ unsigned int nr_rings;
+};
+
+/* So far, with hardware that I've seen to date, we can have:
+ * + zero, one, or two z180 2d cores
+ * + a3xx or a2xx 3d core, which share a common CP (the firmware
+ * for the CP seems to implement some different PM4 packet types
+ * but the basics of cmdstream submission are the same)
+ *
+ * Which means that the eventual complete "class" hierarchy, once
+ * support for all past and present hw is in place, becomes:
+ * + msm_gpu
+ * + adreno_gpu
+ * + a3xx_gpu
+ * + a2xx_gpu
+ * + z180_gpu
+ */
+struct msm_gpu_funcs {
+ int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t *value, uint32_t *len);
+ int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t value, uint32_t len);
+ int (*hw_init)(struct msm_gpu *gpu);
+ int (*pm_suspend)(struct msm_gpu *gpu);
+ int (*pm_resume)(struct msm_gpu *gpu);
+ void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+ void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ irqreturn_t (*irq)(struct msm_gpu *irq);
+ struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
+ void (*recover)(struct msm_gpu *gpu);
+ void (*destroy)(struct msm_gpu *gpu);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ /* show GPU status in debugfs: */
+ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+ /* for generation specific debugfs: */
+ void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
+#endif
+ /* note: gpu_busy() can assume that we have been pm_resumed */
+ u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
+ struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+ int (*gpu_state_put)(struct msm_gpu_state *state);
+ unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
+ /* note: gpu_set_freq() can assume that we have been pm_resumed */
+ void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended);
+ struct msm_gem_address_space *(*create_address_space)
+ (struct msm_gpu *gpu, struct platform_device *pdev);
+ struct msm_gem_address_space *(*create_private_address_space)
+ (struct msm_gpu *gpu);
+ uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+ /**
+ * progress: Has the GPU made progress?
+ *
+ * Return true if GPU position in cmdstream has advanced (or changed)
+ * since the last call. To avoid false negatives, this should account
+ * for cmdstream that is buffered in this FIFO upstream of the CP fw.
+ */
+ bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+};
+
+/* Additional state for iommu faults: */
+struct msm_gpu_fault_info {
+ u64 ttbr0;
+ unsigned long iova;
+ int flags;
+ const char *type;
+ const char *block;
+};
+
+/**
+ * struct msm_gpu_devfreq - devfreq related state
+ */
+struct msm_gpu_devfreq {
+ /** devfreq: devfreq instance */
+ struct devfreq *devfreq;
+
+ /** lock: lock for "suspended", "busy_cycles", and "time" */
+ struct mutex lock;
+
+ /**
+ * idle_constraint:
+ *
+ * A PM QoS constraint to limit max freq while the GPU is idle.
+ */
+ struct dev_pm_qos_request idle_freq;
+
+ /**
+ * boost_constraint:
+ *
+ * A PM QoS constraint to boost min freq for a period of time
+ * until the boost expires.
+ */
+ struct dev_pm_qos_request boost_freq;
+
+ /**
+ * busy_cycles: Last busy counter value, for calculating elapsed busy
+ * cycles since last sampling period.
+ */
+ u64 busy_cycles;
+
+ /** time: Time of last sampling period. */
+ ktime_t time;
+
+ /** idle_time: Time of last transition to idle: */
+ ktime_t idle_time;
+
+ struct devfreq_dev_status average_status;
+
+ /**
+ * idle_work:
+ *
+ * Used to delay clamping to idle freq on active->idle transition.
+ */
+ struct msm_hrtimer_work idle_work;
+
+ /**
+ * boost_work:
+ *
+ * Used to reset the boost_constraint after the boost period has
+ * elapsed
+ */
+ struct msm_hrtimer_work boost_work;
+
+ /** suspended: tracks if we're suspended */
+ bool suspended;
+};
+
+struct msm_gpu {
+ const char *name;
+ struct drm_device *dev;
+ struct platform_device *pdev;
+ const struct msm_gpu_funcs *funcs;
+
+ struct adreno_smmu_priv adreno_smmu;
+
+ /* performance counters (hw & sw): */
+ spinlock_t perf_lock;
+ bool perfcntr_active;
+ struct {
+ bool active;
+ ktime_t time;
+ } last_sample;
+ uint32_t totaltime, activetime; /* sw counters */
+ uint32_t last_cntrs[5]; /* hw counters */
+ const struct msm_gpu_perfcntr *perfcntrs;
+ uint32_t num_perfcntrs;
+
+ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+ int nr_rings;
+
+ /**
+ * sysprof_active:
+ *
+ * The count of contexts that have enabled system profiling.
+ */
+ refcount_t sysprof_active;
+
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the last context to submit rendering,
+ * and the one with current pgtables installed (for generations
+ * that support per-context pgtables). Tracked by seqno rather
+ * than pointer value to avoid dangling pointers, and cases where
+ * a ctx can be freed and a new one created with the same address.
+ */
+ int cur_ctx_seqno;
+
+ /**
+ * lock:
+ *
+ * General lock for serializing all the gpu things.
+ *
+ * TODO move to per-ring locking where feasible (ie. submit/retire
+ * path, etc)
+ */
+ struct mutex lock;
+
+ /**
+ * active_submits:
+ *
+ * The number of submitted but not yet retired submits, used to
+ * determine transitions between active and idle.
+ *
+ * Protected by active_lock
+ */
+ int active_submits;
+
+ /** lock: protects active_submits and idle/active transitions */
+ struct mutex active_lock;
+
+ /* does gpu need hw_init? */
+ bool needs_hw_init;
+
+ /**
+ * global_faults: number of GPU hangs not attributed to a particular
+ * address space
+ */
+ int global_faults;
+
+ void __iomem *mmio;
+ int irq;
+
+ struct msm_gem_address_space *aspace;
+
+ /* Power Control: */
+ struct regulator *gpu_reg, *gpu_cx;
+ struct clk_bulk_data *grp_clks;
+ int nr_clocks;
+ struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+ uint32_t fast_rate;
+
+ /* Hang and Inactivity Detection:
+ */
+#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
+
+#define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
+#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
+ struct timer_list hangcheck_timer;
+
+ /* Fault info for most recent iova fault: */
+ struct msm_gpu_fault_info fault_info;
+
+ /* work for handling GPU ioval faults: */
+ struct kthread_work fault_work;
+
+ /* work for handling GPU recovery: */
+ struct kthread_work recover_work;
+
+ /** retire_event: notified when submits are retired: */
+ wait_queue_head_t retire_event;
+
+ /* work for handling active-list retiring: */
+ struct kthread_work retire_work;
+
+ /* worker for retire/recover: */
+ struct kthread_worker *worker;
+
+ struct drm_gem_object *memptrs_bo;
+
+ struct msm_gpu_devfreq devfreq;
+
+ uint32_t suspend_count;
+
+ struct msm_gpu_state *crashstate;
+
+ /* Enable clamping to idle freq when inactive: */
+ bool clamp_to_idle;
+
+ /* True if the hardware supports expanded apriv (a650 and newer) */
+ bool hw_apriv;
+
+ struct thermal_cooling_device *cooling;
+
+ /* To poll for cx gdsc collapse during gpu recovery */
+ struct reset_control *cx_collapse;
+};
+
+static inline struct msm_gpu *dev_to_gpu(struct device *dev)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
+
+ if (!adreno_smmu)
+ return NULL;
+
+ return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
+}
+
+/* It turns out that all targets use the same ringbuffer size */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+ (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
+static inline bool msm_gpu_active(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
+ return true;
+ }
+
+ return false;
+}
+
+/* Perf-Counters:
+ * The select_reg and select_val are just there for the benefit of the child
+ * class that actually enables the perf counter.. but msm_gpu base class
+ * will handle sampling/displaying the counters.
+ */
+
+struct msm_gpu_perfcntr {
+ uint32_t select_reg;
+ uint32_t sample_reg;
+ uint32_t select_val;
+ const char *name;
+};
+
+/*
+ * The number of priority levels provided by drm gpu scheduler. The
+ * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
+ * cases, so we don't use it (no need for kernel generated jobs).
+ */
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
+
+/**
+ * struct msm_file_private - per-drm_file context
+ *
+ * @queuelock: synchronizes access to submitqueues list
+ * @submitqueues: list of &msm_gpu_submitqueue created by userspace
+ * @queueid: counter incremented each time a submitqueue is created,
+ * used to assign &msm_gpu_submitqueue.id
+ * @aspace: the per-process GPU address-space
+ * @ref: reference count
+ * @seqno: unique per process seqno
+ */
+struct msm_file_private {
+ rwlock_t queuelock;
+ struct list_head submitqueues;
+ int queueid;
+ struct msm_gem_address_space *aspace;
+ struct kref ref;
+ int seqno;
+
+ /**
+ * sysprof:
+ *
+ * The value of MSM_PARAM_SYSPROF set by userspace. This is
+ * intended to be used by system profiling tools like Mesa's
+ * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
+ *
+ * Setting a value of 1 will preserve performance counters across
+ * context switches. Setting a value of 2 will in addition
+ * suppress suspend. (Performance counters lose state across
+ * power collapse, which is undesirable for profiling in some
+ * cases.)
+ *
+ * The value automatically reverts to zero when the drm device
+ * file is closed.
+ */
+ int sysprof;
+
+ /**
+ * comm: Overridden task comm, see MSM_PARAM_COMM
+ *
+ * Accessed under msm_gpu::lock
+ */
+ char *comm;
+
+ /**
+ * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+ *
+ * Accessed under msm_gpu::lock
+ */
+ char *cmdline;
+
+ /**
+ * elapsed:
+ *
+ * The total (cumulative) elapsed time GPU was busy with rendering
+ * from this context in ns.
+ */
+ uint64_t elapsed_ns;
+
+ /**
+ * cycles:
+ *
+ * The total (cumulative) GPU cycles elapsed attributed to this
+ * context.
+ */
+ uint64_t cycles;
+
+ /**
+ * entities:
+ *
+ * Table of per-priority-level sched entities used by submitqueues
+ * associated with this &drm_file. Because some userspace apps
+ * make assumptions about rendering from multiple gl contexts
+ * (of the same priority) within the process happening in FIFO
+ * order without requiring any fencing beyond MakeCurrent(), we
+ * create at most one &drm_sched_entity per-process per-priority-
+ * level.
+ */
+ struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
+};
+
+/**
+ * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
+ *
+ * @gpu: the gpu instance
+ * @prio: the userspace priority level
+ * @ring_nr: [out] the ringbuffer the userspace priority maps to
+ * @sched_prio: [out] the gpu scheduler priority level which the userspace
+ * priority maps to
+ *
+ * With drm/scheduler providing it's own level of prioritization, our total
+ * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
+ * Each ring is associated with it's own scheduler instance. However, our
+ * UABI is that lower numerical values are higher priority. So mapping the
+ * single userspace priority level into ring_nr and sched_prio takes some
+ * care. The userspace provided priority (when a submitqueue is created)
+ * is mapped to ring nr and scheduler priority as such:
+ *
+ * ring_nr = userspace_prio / NR_SCHED_PRIORITIES
+ * sched_prio = NR_SCHED_PRIORITIES -
+ * (userspace_prio % NR_SCHED_PRIORITIES) - 1
+ *
+ * This allows generations without preemption (nr_rings==1) to have some
+ * amount of prioritization, and provides more priority levels for gens
+ * that do have preemption.
+ */
+static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
+ unsigned *ring_nr, enum drm_sched_priority *sched_prio)
+{
+ unsigned rn, sp;
+
+ rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
+
+ /* invert sched priority to map to higher-numeric-is-higher-
+ * priority convention
+ */
+ sp = NR_SCHED_PRIORITIES - sp - 1;
+
+ if (rn >= gpu->nr_rings)
+ return -EINVAL;
+
+ *ring_nr = rn;
+ *sched_prio = sp;
+
+ return 0;
+}
+
+/**
+ * struct msm_gpu_submitqueues - Userspace created context.
+ *
+ * A submitqueue is associated with a gl context or vk queue (or equiv)
+ * in userspace.
+ *
+ * @id: userspace id for the submitqueue, unique within the drm_file
+ * @flags: userspace flags for the submitqueue, specified at creation
+ * (currently unusued)
+ * @ring_nr: the ringbuffer used by this submitqueue, which is determined
+ * by the submitqueue's priority
+ * @faults: the number of GPU hangs associated with this submitqueue
+ * @last_fence: the sequence number of the last allocated fence (for error
+ * checking)
+ * @ctx: the per-drm_file context associated with the submitqueue (ie.
+ * which set of pgtables do submits jobs associated with the
+ * submitqueue use)
+ * @node: node in the context's list of submitqueues
+ * @fence_idr: maps fence-id to dma_fence for userspace visible fence
+ * seqno, protected by submitqueue lock
+ * @idr_lock: for serializing access to fence_idr
+ * @lock: submitqueue lock for serializing submits on a queue
+ * @ref: reference count
+ * @entity: the submit job-queue
+ */
+struct msm_gpu_submitqueue {
+ int id;
+ u32 flags;
+ u32 ring_nr;
+ int faults;
+ uint32_t last_fence;
+ struct msm_file_private *ctx;
+ struct list_head node;
+ struct idr fence_idr;
+ struct spinlock idr_lock;
+ struct mutex lock;
+ struct kref ref;
+ struct drm_sched_entity *entity;
+};
+
+struct msm_gpu_state_bo {
+ u64 iova;
+ size_t size;
+ void *data;
+ bool encoded;
+ char name[32];
+};
+
+struct msm_gpu_state {
+ struct kref ref;
+ struct timespec64 time;
+
+ struct {
+ u64 iova;
+ u32 fence;
+ u32 seqno;
+ u32 rptr;
+ u32 wptr;
+ void *data;
+ int data_size;
+ bool encoded;
+ } ring[MSM_GPU_MAX_RINGS];
+
+ int nr_registers;
+ u32 *registers;
+
+ u32 rbbm_status;
+
+ char *comm;
+ char *cmd;
+
+ struct msm_gpu_fault_info fault_info;
+
+ int nr_bos;
+ struct msm_gpu_state_bo *bos;
+};
+
+static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
+{
+ msm_writel(data, gpu->mmio + (reg << 2));
+}
+
+static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
+{
+ return msm_readl(gpu->mmio + (reg << 2));
+}
+
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ msm_rmw(gpu->mmio + (reg << 2), mask, or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (reg << 2));
+ val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu);
+int msm_gpu_pm_resume(struct msm_gpu *gpu);
+
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ struct drm_printer *p);
+
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id);
+int msm_submitqueue_create(struct drm_device *drm,
+ struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
+int msm_file_private_set_sysprof(struct msm_file_private *ctx,
+ struct msm_gpu *gpu, int sysprof);
+void __msm_file_private_destroy(struct kref *kref);
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+ kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+ struct msm_file_private *ctx)
+{
+ kref_get(&ctx->ref);
+ return ctx;
+}
+
+void msm_devfreq_init(struct msm_gpu *gpu);
+void msm_devfreq_cleanup(struct msm_gpu *gpu);
+void msm_devfreq_resume(struct msm_gpu *gpu);
+void msm_devfreq_suspend(struct msm_gpu *gpu);
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
+void msm_devfreq_active(struct msm_gpu *gpu);
+void msm_devfreq_idle(struct msm_gpu *gpu);
+
+int msm_gpu_hw_init(struct msm_gpu *gpu);
+
+void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
+void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
+int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
+ uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
+
+void msm_gpu_retire(struct msm_gpu *gpu);
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, struct msm_gpu_config *config);
+
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+
+void msm_gpu_cleanup(struct msm_gpu *gpu);
+
+struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
+void __init adreno_register(void);
+void __exit adreno_unregister(void);
+
+static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
+{
+ if (queue)
+ kref_put(&queue->ref, msm_submitqueue_destroy);
+}
+
+static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = NULL;
+
+ mutex_lock(&gpu->lock);
+
+ if (gpu->crashstate) {
+ kref_get(&gpu->crashstate->ref);
+ state = gpu->crashstate;
+ }
+
+ mutex_unlock(&gpu->lock);
+
+ return state;
+}
+
+static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
+{
+ mutex_lock(&gpu->lock);
+
+ if (gpu->crashstate) {
+ if (gpu->funcs->gpu_state_put(gpu->crashstate))
+ gpu->crashstate = NULL;
+ }
+
+ mutex_unlock(&gpu->lock);
+}
+
+/*
+ * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
+ * support expanded privileges
+ */
+#define check_apriv(gpu, flags) \
+ (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
+
+
+#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
new file mode 100644
index 000000000..85c443a37
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "msm_gpu.h"
+#include "msm_gpu_trace.h"
+
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/math64.h>
+#include <linux/units.h>
+
+/*
+ * Power Management:
+ */
+
+static int msm_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ struct dev_pm_opp *opp;
+
+ /*
+ * Note that devfreq_recommended_opp() can modify the freq
+ * to something that actually is in the opp table:
+ */
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
+
+ if (gpu->funcs->gpu_set_freq) {
+ mutex_lock(&df->lock);
+ gpu->funcs->gpu_set_freq(gpu, opp, df->suspended);
+ mutex_unlock(&df->lock);
+ } else {
+ clk_set_rate(gpu->core_clk, *freq);
+ }
+
+ dev_pm_opp_put(opp);
+
+ return 0;
+}
+
+static unsigned long get_freq(struct msm_gpu *gpu)
+{
+ if (gpu->funcs->gpu_get_freq)
+ return gpu->funcs->gpu_get_freq(gpu);
+
+ return clk_get_rate(gpu->core_clk);
+}
+
+static void get_raw_dev_status(struct msm_gpu *gpu,
+ struct devfreq_dev_status *status)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ u64 busy_cycles, busy_time;
+ unsigned long sample_rate;
+ ktime_t time;
+
+ mutex_lock(&df->lock);
+
+ status->current_frequency = get_freq(gpu);
+ time = ktime_get();
+ status->total_time = ktime_us_delta(time, df->time);
+ df->time = time;
+
+ if (df->suspended) {
+ mutex_unlock(&df->lock);
+ status->busy_time = 0;
+ return;
+ }
+
+ busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
+ busy_time = busy_cycles - df->busy_cycles;
+ df->busy_cycles = busy_cycles;
+
+ mutex_unlock(&df->lock);
+
+ busy_time *= USEC_PER_SEC;
+ busy_time = div64_ul(busy_time, sample_rate);
+ if (WARN_ON(busy_time > ~0LU))
+ busy_time = ~0LU;
+
+ status->busy_time = busy_time;
+}
+
+static void update_average_dev_status(struct msm_gpu *gpu,
+ const struct devfreq_dev_status *raw)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ const u32 polling_ms = df->devfreq->profile->polling_ms;
+ const u32 max_history_ms = polling_ms * 11 / 10;
+ struct devfreq_dev_status *avg = &df->average_status;
+ u64 avg_freq;
+
+ /* simple_ondemand governor interacts poorly with gpu->clamp_to_idle.
+ * When we enforce the constraint on idle, it calls get_dev_status
+ * which would normally reset the stats. When we remove the
+ * constraint on active, it calls get_dev_status again where busy_time
+ * would be 0.
+ *
+ * To remedy this, we always return the average load over the past
+ * polling_ms.
+ */
+
+ /* raw is longer than polling_ms or avg has no history */
+ if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms ||
+ !avg->total_time) {
+ *avg = *raw;
+ return;
+ }
+
+ /* Truncate the oldest history first.
+ *
+ * Because we keep the history with a single devfreq_dev_status,
+ * rather than a list of devfreq_dev_status, we have to assume freq
+ * and load are the same over avg->total_time. We can scale down
+ * avg->busy_time and avg->total_time by the same factor to drop
+ * history.
+ */
+ if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >=
+ max_history_ms) {
+ const u32 new_total_time = polling_ms * USEC_PER_MSEC -
+ raw->total_time;
+ avg->busy_time = div_u64(
+ mul_u32_u32(avg->busy_time, new_total_time),
+ avg->total_time);
+ avg->total_time = new_total_time;
+ }
+
+ /* compute the average freq over avg->total_time + raw->total_time */
+ avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time);
+ avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time);
+ do_div(avg_freq, avg->total_time + raw->total_time);
+
+ avg->current_frequency = avg_freq;
+ avg->busy_time += raw->busy_time;
+ avg->total_time += raw->total_time;
+}
+
+static int msm_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+ struct devfreq_dev_status raw;
+
+ get_raw_dev_status(gpu, &raw);
+ update_average_dev_status(gpu, &raw);
+ *status = gpu->devfreq.average_status;
+
+ return 0;
+}
+
+static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ *freq = get_freq(dev_to_gpu(dev));
+
+ return 0;
+}
+
+static struct devfreq_dev_profile msm_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .polling_ms = 50,
+ .target = msm_devfreq_target,
+ .get_dev_status = msm_devfreq_get_dev_status,
+ .get_cur_freq = msm_devfreq_get_cur_freq,
+};
+
+static void msm_devfreq_boost_work(struct kthread_work *work);
+static void msm_devfreq_idle_work(struct kthread_work *work);
+
+static bool has_devfreq(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ return !!df->devfreq;
+}
+
+void msm_devfreq_init(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ /* We need target support to do devfreq */
+ if (!gpu->funcs->gpu_busy)
+ return;
+
+ mutex_init(&df->lock);
+
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+
+ msm_devfreq_profile.initial_freq = gpu->fast_rate;
+
+ /*
+ * Don't set the freq_table or max_state and let devfreq build the table
+ * from OPP
+ * After a deferred probe, these may have be left to non-zero values,
+ * so set them back to zero before creating the devfreq device
+ */
+ msm_devfreq_profile.freq_table = NULL;
+ msm_devfreq_profile.max_state = 0;
+
+ df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
+ &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
+
+ if (IS_ERR(df->devfreq)) {
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_qos_remove_request(&df->idle_freq);
+ dev_pm_qos_remove_request(&df->boost_freq);
+ df->devfreq = NULL;
+ return;
+ }
+
+ devfreq_suspend_device(df->devfreq);
+
+ gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq);
+ if (IS_ERR(gpu->cooling)) {
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Couldn't register GPU cooling device\n");
+ gpu->cooling = NULL;
+ }
+
+ msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+}
+
+static void cancel_idle_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->idle_work.timer);
+ kthread_cancel_work_sync(&df->idle_work.work);
+}
+
+static void cancel_boost_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->boost_work.timer);
+ kthread_cancel_work_sync(&df->boost_work.work);
+}
+
+void msm_devfreq_cleanup(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ if (!has_devfreq(gpu))
+ return;
+
+ devfreq_cooling_unregister(gpu->cooling);
+ dev_pm_qos_remove_request(&df->boost_freq);
+ dev_pm_qos_remove_request(&df->idle_freq);
+}
+
+void msm_devfreq_resume(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ unsigned long sample_rate;
+
+ if (!has_devfreq(gpu))
+ return;
+
+ mutex_lock(&df->lock);
+ df->busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
+ df->time = ktime_get();
+ df->suspended = false;
+ mutex_unlock(&df->lock);
+
+ devfreq_resume_device(df->devfreq);
+}
+
+void msm_devfreq_suspend(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ if (!has_devfreq(gpu))
+ return;
+
+ mutex_lock(&df->lock);
+ df->suspended = true;
+ mutex_unlock(&df->lock);
+
+ devfreq_suspend_device(df->devfreq);
+
+ cancel_idle_work(df);
+ cancel_boost_work(df);
+}
+
+static void msm_devfreq_boost_work(struct kthread_work *work)
+{
+ struct msm_gpu_devfreq *df = container_of(work,
+ struct msm_gpu_devfreq, boost_work.work);
+
+ dev_pm_qos_update_request(&df->boost_freq, 0);
+}
+
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ uint64_t freq;
+
+ if (!has_devfreq(gpu))
+ return;
+
+ freq = get_freq(gpu);
+ freq *= factor;
+
+ /*
+ * A nice little trap is that PM QoS operates in terms of KHz,
+ * while devfreq operates in terms of Hz:
+ */
+ do_div(freq, HZ_PER_KHZ);
+
+ dev_pm_qos_update_request(&df->boost_freq, freq);
+
+ msm_hrtimer_queue_work(&df->boost_work,
+ ms_to_ktime(msm_devfreq_profile.polling_ms),
+ HRTIMER_MODE_REL);
+}
+
+void msm_devfreq_active(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ unsigned int idle_time;
+
+ if (!has_devfreq(gpu))
+ return;
+
+ /*
+ * Cancel any pending transition to idle frequency:
+ */
+ cancel_idle_work(df);
+
+ idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
+
+ /*
+ * If we've been idle for a significant fraction of a polling
+ * interval, then we won't meet the threshold of busyness for
+ * the governor to ramp up the freq.. so give some boost
+ */
+ if (idle_time > msm_devfreq_profile.polling_ms) {
+ msm_devfreq_boost(gpu, 2);
+ }
+
+ dev_pm_qos_update_request(&df->idle_freq,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+}
+
+
+static void msm_devfreq_idle_work(struct kthread_work *work)
+{
+ struct msm_gpu_devfreq *df = container_of(work,
+ struct msm_gpu_devfreq, idle_work.work);
+ struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
+
+ df->idle_time = ktime_get();
+
+ if (gpu->clamp_to_idle)
+ dev_pm_qos_update_request(&df->idle_freq, 0);
+}
+
+void msm_devfreq_idle(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ if (!has_devfreq(gpu))
+ return;
+
+ msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
+ HRTIMER_MODE_REL);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
new file mode 100644
index 000000000..ac40d857b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm_gpu
+#define TRACE_INCLUDE_FILE msm_gpu_trace
+
+TRACE_EVENT(msm_gpu_submit,
+ TP_PROTO(pid_t pid, u32 ringid, u32 id, u32 nr_bos, u32 nr_cmds),
+ TP_ARGS(pid, ringid, id, nr_bos, nr_cmds),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, nr_cmds)
+ __field(u32, nr_bos)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->id = id;
+ __entry->ringid = ringid;
+ __entry->nr_bos = nr_bos;
+ __entry->nr_cmds = nr_cmds
+ ),
+ TP_printk("id=%d pid=%d ring=%d bos=%d cmds=%d",
+ __entry->id, __entry->pid, __entry->ringid,
+ __entry->nr_bos, __entry->nr_cmds)
+);
+
+TRACE_EVENT(msm_gpu_submit_flush,
+ TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
+ TP_ARGS(submit, ticks),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->ticks = ticks;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d ticks=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_submit_retired,
+ TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
+ u64 start, u64 end),
+ TP_ARGS(submit, elapsed, clock, start, end),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, elapsed)
+ __field(u64, clock)
+ __field(u64, start_ticks)
+ __field(u64, end_ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->elapsed = elapsed;
+ __entry->clock = clock;
+ __entry->start_ticks = start;
+ __entry->end_ticks = end;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d elapsed=%lld ns mhz=%lld start=%lld end=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->elapsed, __entry->clock,
+ __entry->start_ticks, __entry->end_ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_freq_change,
+ TP_PROTO(u32 freq),
+ TP_ARGS(freq),
+ TP_STRUCT__entry(
+ __field(u32, freq)
+ ),
+ TP_fast_assign(
+ /* trace freq in MHz to match intel_gpu_freq_change, to make life easier
+ * for userspace
+ */
+ __entry->freq = DIV_ROUND_UP(freq, 1000000);
+ ),
+ TP_printk("new_freq=%u", __entry->freq)
+);
+
+
+TRACE_EVENT(msm_gmu_freq_change,
+ TP_PROTO(u32 freq, u32 perf_index),
+ TP_ARGS(freq, perf_index),
+ TP_STRUCT__entry(
+ __field(u32, freq)
+ __field(u32, perf_index)
+ ),
+ TP_fast_assign(
+ __entry->freq = freq;
+ __entry->perf_index = perf_index;
+ ),
+ TP_printk("freq=%u, perf_index=%u", __entry->freq, __entry->perf_index)
+);
+
+
+TRACE_EVENT(msm_gem_shrink,
+ TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted,
+ u32 active_purged, u32 active_evicted),
+ TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted),
+ TP_STRUCT__entry(
+ __field(u32, nr_to_scan)
+ __field(u32, purged)
+ __field(u32, evicted)
+ __field(u32, active_purged)
+ __field(u32, active_evicted)
+ ),
+ TP_fast_assign(
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->purged = purged;
+ __entry->evicted = evicted;
+ __entry->active_purged = active_purged;
+ __entry->active_evicted = active_evicted;
+ ),
+ TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg",
+ __entry->nr_to_scan, __entry->purged, __entry->evicted,
+ __entry->active_purged, __entry->active_evicted)
+);
+
+
+TRACE_EVENT(msm_gem_purge_vmaps,
+ TP_PROTO(u32 unmapped),
+ TP_ARGS(unmapped),
+ TP_STRUCT__entry(
+ __field(u32, unmapped)
+ ),
+ TP_fast_assign(
+ __entry->unmapped = unmapped;
+ ),
+ TP_printk("Purging %u vmaps", __entry->unmapped)
+);
+
+
+TRACE_EVENT(msm_gpu_suspend,
+ TP_PROTO(int dummy),
+ TP_ARGS(dummy),
+ TP_STRUCT__entry(
+ __field(u32, dummy)
+ ),
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+ TP_printk("%u", __entry->dummy)
+);
+
+
+TRACE_EVENT(msm_gpu_resume,
+ TP_PROTO(int dummy),
+ TP_ARGS(dummy),
+ TP_STRUCT__entry(
+ __field(u32, dummy)
+ ),
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+ TP_printk("%u", __entry->dummy)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu_tracepoints.c b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
new file mode 100644
index 000000000..72c074f8c
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "msm_gem.h"
+#include "msm_ringbuffer.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_gpu_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
new file mode 100644
index 000000000..f7d1945e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/dma-mapping.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+#include "adreno/a2xx.xml.h"
+
+struct msm_gpummu {
+ struct msm_mmu base;
+ struct msm_gpu *gpu;
+ dma_addr_t pt_base;
+ uint32_t *table;
+};
+#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static void msm_gpummu_detach(struct msm_mmu *mmu)
+{
+}
+
+static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, size_t len, int prot)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ struct sg_dma_page_iter dma_iter;
+ unsigned prot_bits = 0;
+
+ if (prot & IOMMU_WRITE)
+ prot_bits |= 1;
+ if (prot & IOMMU_READ)
+ prot_bits |= 2;
+
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
+ int i;
+
+ for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
+ gpummu->table[idx++] = (addr + i) | prot_bits;
+ }
+
+ /* we can improve by deferring flush for multiple map() */
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ unsigned i;
+
+ for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+ gpummu->table[idx] = 0;
+
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static void msm_gpummu_resume_translation(struct msm_mmu *mmu)
+{
+}
+
+static void msm_gpummu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+
+ dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+
+ kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .detach = msm_gpummu_detach,
+ .map = msm_gpummu_map,
+ .unmap = msm_gpummu_unmap,
+ .destroy = msm_gpummu_destroy,
+ .resume_translation = msm_gpummu_resume_translation,
+};
+
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+ struct msm_gpummu *gpummu;
+
+ gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+ if (!gpummu)
+ return ERR_PTR(-ENOMEM);
+
+ gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+ GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!gpummu->table) {
+ kfree(gpummu);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gpummu->gpu = gpu;
+ msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
+
+ return &gpummu->base;
+}
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error)
+{
+ dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+
+ *pt_base = base;
+ *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
new file mode 100644
index 000000000..d02cd29ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/interconnect.h>
+
+#include "msm_drv.h"
+
+/*
+ * Util/helpers:
+ */
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name)
+{
+ int i;
+ char n[32];
+
+ snprintf(n, sizeof(n), "%s_clk", name);
+
+ for (i = 0; bulk && i < count; i++) {
+ if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
+ return bulk[i].clk;
+ }
+
+
+ return NULL;
+}
+
+struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
+{
+ struct clk *clk;
+ char name2[32];
+
+ clk = devm_clk_get(&pdev->dev, name);
+ if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ return clk;
+
+ snprintf(name2, sizeof(name2), "%s_clk", name);
+
+ clk = devm_clk_get(&pdev->dev, name2);
+ if (!IS_ERR(clk))
+ dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
+ "\"%s\" instead of \"%s\"\n", name, name2);
+
+ return clk;
+}
+
+static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
+ bool quiet, phys_addr_t *psize)
+{
+ struct resource *res;
+ unsigned long size;
+ void __iomem *ptr;
+
+ if (name)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ if (!quiet)
+ DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ ptr = devm_ioremap(&pdev->dev, res->start, size);
+ if (!ptr) {
+ if (!quiet)
+ DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (psize)
+ *psize = size;
+
+ return ptr;
+}
+
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name)
+{
+ return _msm_ioremap(pdev, name, false, NULL);
+}
+
+void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name)
+{
+ return _msm_ioremap(pdev, name, true, NULL);
+}
+
+void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
+ phys_addr_t *psize)
+{
+ return _msm_ioremap(pdev, name, false, psize);
+}
+
+static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t)
+{
+ struct msm_hrtimer_work *work = container_of(t,
+ struct msm_hrtimer_work, timer);
+
+ kthread_queue_work(work->worker, &work->work);
+
+ return HRTIMER_NORESTART;
+}
+
+void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
+ ktime_t wakeup_time,
+ enum hrtimer_mode mode)
+{
+ hrtimer_start(&work->timer, wakeup_time, mode);
+}
+
+void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
+ struct kthread_worker *worker,
+ kthread_work_func_t fn,
+ clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ hrtimer_init(&work->timer, clock_id, mode);
+ work->timer.function = msm_hrtimer_worktimer;
+ work->worker = worker;
+ kthread_init_work(&work->work, fn);
+}
+
+struct icc_path *msm_icc_get(struct device *dev, const char *name)
+{
+ struct device *mdss_dev = dev->parent;
+ struct icc_path *path;
+
+ path = of_icc_get(dev, name);
+ if (path)
+ return path;
+
+ /*
+ * If there are no interconnects attached to the corresponding device
+ * node, of_icc_get() will return NULL.
+ *
+ * If the MDP5/DPU device node doesn't have interconnects, lookup the
+ * path in the parent (MDSS) device.
+ */
+ return of_icc_get(mdss_dev, name);
+
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
new file mode 100644
index 000000000..d12ba47b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/adreno-smmu-priv.h>
+#include <linux/io-pgtable.h>
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+ atomic_t pagetables;
+};
+
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+struct msm_iommu_pagetable {
+ struct msm_mmu base;
+ struct msm_mmu *parent;
+ struct io_pgtable_ops *pgtbl_ops;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
+ phys_addr_t ttbr;
+ u32 asid;
+};
+static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
+{
+ return container_of(mmu, struct msm_iommu_pagetable, base);
+}
+
+/* based on iommu_pgsize() in iommu.c: */
+static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, size_t *count)
+{
+ unsigned int pgsize_idx, pgsize_idx_next;
+ unsigned long pgsizes;
+ size_t offset, pgsize, pgsize_next;
+ unsigned long addr_merge = paddr | iova;
+
+ /* Page sizes supported by the hardware and small enough for @size */
+ pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
+
+ /* Constrain the page sizes further based on the maximum alignment */
+ if (likely(addr_merge))
+ pgsizes &= GENMASK(__ffs(addr_merge), 0);
+
+ /* Make sure we have at least one suitable page size */
+ BUG_ON(!pgsizes);
+
+ /* Pick the biggest page size remaining */
+ pgsize_idx = __fls(pgsizes);
+ pgsize = BIT(pgsize_idx);
+ if (!count)
+ return pgsize;
+
+ /* Find the next biggest support page size, if it exists */
+ pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
+ if (!pgsizes)
+ goto out_set_count;
+
+ pgsize_idx_next = __ffs(pgsizes);
+ pgsize_next = BIT(pgsize_idx_next);
+
+ /*
+ * There's no point trying a bigger page size unless the virtual
+ * and physical addresses are similarly offset within the larger page.
+ */
+ if ((iova ^ paddr) & (pgsize_next - 1))
+ goto out_set_count;
+
+ /* Calculate the offset to the next page size alignment boundary */
+ offset = pgsize_next - (addr_merge & (pgsize_next - 1));
+
+ /*
+ * If size is big enough to accommodate the larger page, reduce
+ * the number of smaller pages.
+ */
+ if (offset + pgsize_next <= size)
+ size = offset;
+
+out_set_count:
+ *count = size >> pgsize_idx;
+ return pgsize;
+}
+
+static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
+ size_t size)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+
+ while (size) {
+ size_t unmapped, pgsize, count;
+
+ pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
+
+ unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
+ if (!unmapped)
+ break;
+
+ iova += unmapped;
+ size -= unmapped;
+ }
+
+ iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
+
+ return (size == 0) ? 0 : -EINVAL;
+}
+
+static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+ struct sg_table *sgt, size_t len, int prot)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+ struct scatterlist *sg;
+ u64 addr = iova;
+ unsigned int i;
+
+ for_each_sgtable_sg(sgt, sg, i) {
+ size_t size = sg->length;
+ phys_addr_t phys = sg_phys(sg);
+
+ while (size) {
+ size_t pgsize, count, mapped = 0;
+ int ret;
+
+ pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
+
+ ret = ops->map_pages(ops, addr, phys, pgsize, count,
+ prot, GFP_KERNEL, &mapped);
+
+ /* map_pages could fail after mapping some of the pages,
+ * so update the counters before error handling.
+ */
+ phys += mapped;
+ addr += mapped;
+ size -= mapped;
+
+ if (ret) {
+ msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
+{
+ struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+ struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+ struct adreno_smmu_priv *adreno_smmu =
+ dev_get_drvdata(pagetable->parent->dev);
+
+ /*
+ * If this is the last attached pagetable for the parent,
+ * disable TTBR0 in the arm-smmu driver
+ */
+ if (atomic_dec_return(&iommu->pagetables) == 0)
+ adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+
+ free_io_pgtable_ops(pagetable->pgtbl_ops);
+ kfree(pagetable);
+}
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu,
+ phys_addr_t *ttbr, int *asid)
+{
+ struct msm_iommu_pagetable *pagetable;
+
+ if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+ return -EINVAL;
+
+ pagetable = to_pagetable(mmu);
+
+ if (ttbr)
+ *ttbr = pagetable->ttbr;
+
+ if (asid)
+ *asid = pagetable->asid;
+
+ return 0;
+}
+
+static const struct msm_mmu_funcs pagetable_funcs = {
+ .map = msm_iommu_pagetable_map,
+ .unmap = msm_iommu_pagetable_unmap,
+ .destroy = msm_iommu_pagetable_destroy,
+};
+
+static void msm_iommu_tlb_flush_all(void *cookie)
+{
+}
+
+static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule, void *cookie)
+{
+}
+
+static const struct iommu_flush_ops null_tlb_ops = {
+ .tlb_flush_all = msm_iommu_tlb_flush_all,
+ .tlb_flush_walk = msm_iommu_tlb_flush_walk,
+ .tlb_add_page = msm_iommu_tlb_add_page,
+};
+
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg);
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
+ struct msm_iommu *iommu = to_msm_iommu(parent);
+ struct msm_iommu_pagetable *pagetable;
+ const struct io_pgtable_cfg *ttbr1_cfg = NULL;
+ struct io_pgtable_cfg ttbr0_cfg;
+ int ret;
+
+ /* Get the pagetable configuration from the domain */
+ if (adreno_smmu->cookie)
+ ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+
+ /*
+ * If you hit this WARN_ONCE() you are probably missing an entry in
+ * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
+ */
+ if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Defer setting the fault handler until we have a valid adreno_smmu
+ * to avoid accidentially installing a GPU specific fault handler for
+ * the display's iommu
+ */
+ iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
+
+ pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
+ if (!pagetable)
+ return ERR_PTR(-ENOMEM);
+
+ msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
+ MSM_MMU_IOMMU_PAGETABLE);
+
+ /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
+ ttbr0_cfg = *ttbr1_cfg;
+
+ /* The incoming cfg will have the TTBR1 quirk enabled */
+ ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
+ ttbr0_cfg.tlb = &null_tlb_ops;
+
+ pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
+ &ttbr0_cfg, iommu->domain);
+
+ if (!pagetable->pgtbl_ops) {
+ kfree(pagetable);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * If this is the first pagetable that we've allocated, send it back to
+ * the arm-smmu driver as a trigger to set up TTBR0
+ */
+ if (atomic_inc_return(&iommu->pagetables) == 1) {
+ /* Enable stall on iommu fault: */
+ adreno_smmu->set_stall(adreno_smmu->cookie, true);
+
+ ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
+ if (ret) {
+ free_io_pgtable_ops(pagetable->pgtbl_ops);
+ kfree(pagetable);
+ return ERR_PTR(ret);
+ }
+ }
+
+ /* Needed later for TLB flush */
+ pagetable->parent = parent;
+ pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
+ pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
+
+ /*
+ * TODO we would like each set of page tables to have a unique ASID
+ * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
+ * end up flushing the ASID used for TTBR1 pagetables, which is not
+ * what we want. So for now just use the same ASID as TTBR1.
+ */
+ pagetable->asid = 0;
+
+ return &pagetable->base;
+}
+
+static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ struct msm_iommu *iommu = arg;
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
+ struct adreno_smmu_fault_info info, *ptr = NULL;
+
+ if (adreno_smmu->get_fault_info) {
+ adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
+ ptr = &info;
+ }
+
+ if (iommu->base.handler)
+ return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
+
+ pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
+ return 0;
+}
+
+static void msm_iommu_resume_translation(struct msm_mmu *mmu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
+
+ adreno_smmu->resume_translation(adreno_smmu->cookie, true);
+}
+
+static void msm_iommu_detach(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ iommu_detach_device(iommu->domain, mmu->dev);
+}
+
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, size_t len, int prot)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ size_t ret;
+
+ /* The arm-smmu driver expects the addresses to be sign extended */
+ if (iova & BIT_ULL(48))
+ iova |= GENMASK_ULL(63, 49);
+
+ ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
+ WARN_ON(!ret);
+
+ return (ret == len) ? 0 : -EINVAL;
+}
+
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ if (iova & BIT_ULL(48))
+ iova |= GENMASK_ULL(63, 49);
+
+ iommu_unmap(iommu->domain, iova, len);
+
+ return 0;
+}
+
+static void msm_iommu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ iommu_domain_free(iommu->domain);
+ kfree(iommu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+ .resume_translation = msm_iommu_resume_translation,
+};
+
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+{
+ struct msm_iommu *iommu;
+ int ret;
+
+ if (!domain)
+ return ERR_PTR(-ENODEV);
+
+ iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return ERR_PTR(-ENOMEM);
+
+ iommu->domain = domain;
+ msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
+
+ atomic_set(&iommu->pagetables, 0);
+
+ ret = iommu_attach_device(iommu->domain, dev);
+ if (ret) {
+ kfree(iommu);
+ return ERR_PTR(ret);
+ }
+
+ return &iommu->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
new file mode 100644
index 000000000..f8ed75889
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_KMS_H__
+#define __MSM_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+
+#define MAX_PLANE 4
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime. The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+ /* hw initialization: */
+ int (*hw_init)(struct msm_kms *kms);
+ /* irq handling: */
+ void (*irq_preinstall)(struct msm_kms *kms);
+ int (*irq_postinstall)(struct msm_kms *kms);
+ void (*irq_uninstall)(struct msm_kms *kms);
+ irqreturn_t (*irq)(struct msm_kms *kms);
+ int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+
+ /*
+ * Atomic commit handling:
+ *
+ * Note that in the case of async commits, the funcs which take
+ * a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
+ * might not be evenly balanced with ->prepare_commit(), however
+ * each crtc that effected by a ->prepare_commit() (potentially
+ * multiple times) will eventually (at end of vsync period) be
+ * flushed and completed.
+ *
+ * This has some implications about tracking of cleanup state,
+ * for example SMP blocks to release after commit completes. Ie.
+ * cleanup state should be also duplicated in the various
+ * duplicate_state() methods, as the current cleanup state at
+ * ->complete_commit() time may have accumulated cleanup work
+ * from multiple commits.
+ */
+
+ /**
+ * Enable/disable power/clks needed for hw access done in other
+ * commit related methods.
+ *
+ * If mdp4 is migrated to runpm, we could probably drop these
+ * and use runpm directly.
+ */
+ void (*enable_commit)(struct msm_kms *kms);
+ void (*disable_commit)(struct msm_kms *kms);
+
+ /**
+ * If the kms backend supports async commit, it should implement
+ * this method to return the time of the next vsync. This is
+ * used to determine a time slightly before vsync, for the async
+ * commit timer to run and complete an async commit.
+ */
+ ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc);
+
+ /**
+ * Prepare for atomic commit. This is called after any previous
+ * (async or otherwise) commit has completed.
+ */
+ void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+
+ /**
+ * Flush an atomic commit. This is called after the hardware
+ * updates have already been pushed down to effected planes/
+ * crtcs/encoders/connectors.
+ */
+ void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /**
+ * Wait for any in-progress flush to complete on the specified
+ * crtcs. This should not block if there is no in-progress
+ * commit (ie. don't just wait for a vblank), as it will also
+ * be called before ->prepare_commit() to ensure any potential
+ * "async" commit has completed.
+ */
+ void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /**
+ * Clean up after commit is completed. This is called after
+ * ->wait_flush(), to give the backend a chance to do any
+ * post-commit cleanup.
+ */
+ void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask);
+
+ /*
+ * Format handling:
+ */
+
+ /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
+ const struct msm_format *(*get_format)(struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+ /* misc: */
+ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder);
+ int (*set_split_display)(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ struct drm_encoder *slave_encoder,
+ bool is_cmd_mode);
+ /* cleanup: */
+ void (*destroy)(struct msm_kms *kms);
+
+ /* snapshot: */
+ void (*snapshot)(struct msm_disp_state *disp_state, struct msm_kms *kms);
+
+#ifdef CONFIG_DEBUG_FS
+ /* debugfs: */
+ int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
+#endif
+};
+
+struct msm_kms;
+
+/*
+ * A per-crtc timer for pending async atomic flushes. Scheduled to expire
+ * shortly before vblank to flush pending async updates.
+ */
+struct msm_pending_timer {
+ struct msm_hrtimer_work work;
+ struct kthread_worker *worker;
+ struct msm_kms *kms;
+ unsigned crtc_idx;
+};
+
+struct msm_kms {
+ const struct msm_kms_funcs *funcs;
+ struct drm_device *dev;
+
+ /* irq number to be passed on to msm_irq_install */
+ int irq;
+ bool irq_requested;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ struct msm_gem_address_space *aspace;
+
+ /* disp snapshot support */
+ struct kthread_worker *dump_worker;
+ struct kthread_work dump_work;
+ struct mutex dump_mutex;
+
+ /*
+ * For async commit, where ->flush_commit() and later happens
+ * from the crtc's pending_timer close to end of the frame:
+ */
+ struct mutex commit_lock[MAX_CRTCS];
+ unsigned pending_crtc_mask;
+ struct msm_pending_timer pending_timers[MAX_CRTCS];
+};
+
+static inline int msm_kms_init(struct msm_kms *kms,
+ const struct msm_kms_funcs *funcs)
+{
+ unsigned i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+ mutex_init(&kms->commit_lock[i]);
+
+ kms->funcs = funcs;
+
+ for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
+ ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static inline void msm_kms_destroy(struct msm_kms *kms)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
+ msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
+}
+
+#define for_each_crtc_mask(dev, crtc, crtc_mask) \
+ drm_for_each_crtc(crtc, dev) \
+ for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
+
+#define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \
+ drm_for_each_crtc_reverse(crtc, dev) \
+ for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
+
+#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
new file mode 100644
index 000000000..3b8d6991b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -0,0 +1,474 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interconnect.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdesc.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+
+/* for DPU_HW_* defines */
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+#define HW_REV 0x0
+#define HW_INTR_STATUS 0x0010
+
+#define UBWC_STATIC 0x144
+#define UBWC_CTRL_2 0x150
+#define UBWC_PREDICTION_MODE 0x154
+
+#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+
+struct msm_mdss {
+ struct device *dev;
+
+ void __iomem *mmio;
+ struct clk_bulk_data *clocks;
+ size_t num_clocks;
+ bool is_mdp5;
+ struct {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irq_controller;
+ struct icc_path *path[2];
+ u32 num_paths;
+};
+
+static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
+ struct msm_mdss *msm_mdss)
+{
+ struct icc_path *path0;
+ struct icc_path *path1;
+
+ path0 = of_icc_get(dev, "mdp0-mem");
+ if (IS_ERR_OR_NULL(path0))
+ return PTR_ERR_OR_ZERO(path0);
+
+ msm_mdss->path[0] = path0;
+ msm_mdss->num_paths = 1;
+
+ path1 = of_icc_get(dev, "mdp1-mem");
+ if (!IS_ERR_OR_NULL(path1)) {
+ msm_mdss->path[1] = path1;
+ msm_mdss->num_paths++;
+ }
+
+ return 0;
+}
+
+static void msm_mdss_put_icc_path(void *data)
+{
+ struct msm_mdss *msm_mdss = data;
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_put(msm_mdss->path[i]);
+}
+
+static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
+{
+ int i;
+
+ for (i = 0; i < msm_mdss->num_paths; i++)
+ icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+}
+
+static void msm_mdss_irq(struct irq_desc *desc)
+{
+ struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 interrupts;
+
+ chained_irq_enter(chip, desc);
+
+ interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
+
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ int rc;
+
+ rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
+ hwirq);
+ if (rc < 0) {
+ dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
+ hwirq, rc);
+ break;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void msm_mdss_irq_mask(struct irq_data *irqd)
+{
+ struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static void msm_mdss_irq_unmask(struct irq_data *irqd)
+{
+ struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip msm_mdss_irq_chip = {
+ .name = "msm_mdss",
+ .irq_mask = msm_mdss_irq_mask,
+ .irq_unmask = msm_mdss_irq_unmask,
+};
+
+static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
+
+static int msm_mdss_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct msm_mdss *msm_mdss = domain->host_data;
+
+ irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
+ irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
+
+ return irq_set_chip_data(irq, msm_mdss);
+}
+
+static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
+ .map = msm_mdss_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ dev = msm_mdss->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &msm_mdss_irqdomain_ops, msm_mdss);
+ if (!domain) {
+ dev_err(dev, "failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ msm_mdss->irq_controller.enabled_mask = 0;
+ msm_mdss->irq_controller.domain = domain;
+
+ return 0;
+}
+
+static int msm_mdss_enable(struct msm_mdss *msm_mdss)
+{
+ int ret;
+
+ /*
+ * Several components have AXI clocks that can only be turned on if
+ * the interconnect is enabled (non-zero bandwidth). Let's make sure
+ * that the interconnects are at least at a minimum amount.
+ */
+ msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+
+ ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
+ if (ret) {
+ dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
+ return ret;
+ }
+
+ /*
+ * HW_REV requires MDSS_MDP_CLK, which is not enabled by the mdss on
+ * mdp5 hardware. Skip reading it for now.
+ */
+ if (msm_mdss->is_mdp5)
+ return 0;
+
+ /*
+ * ubwc config is part of the "mdss" region which is not accessible
+ * from the rest of the driver. hardcode known configurations here
+ */
+ switch (readl_relaxed(msm_mdss->mmio + HW_REV)) {
+ case DPU_HW_VER_500:
+ case DPU_HW_VER_501:
+ writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC);
+ break;
+ case DPU_HW_VER_600:
+ /* TODO: 0x102e for LP_DDR4 */
+ writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC);
+ writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
+ writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
+ break;
+ case DPU_HW_VER_620:
+ writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC);
+ break;
+ case DPU_HW_VER_720:
+ writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC);
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_mdss_disable(struct msm_mdss *msm_mdss)
+{
+ clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
+ msm_mdss_icc_request_bw(msm_mdss, 0);
+
+ return 0;
+}
+
+static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
+{
+ struct platform_device *pdev = to_platform_device(msm_mdss->dev);
+ int irq;
+
+ pm_runtime_suspend(msm_mdss->dev);
+ pm_runtime_disable(msm_mdss->dev);
+ irq_domain_remove(msm_mdss->irq_controller.domain);
+ msm_mdss->irq_controller.domain = NULL;
+ irq = platform_get_irq(pdev, 0);
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+}
+
+static int msm_mdss_reset(struct device *dev)
+{
+ struct reset_control *reset;
+
+ reset = reset_control_get_optional_exclusive(dev, NULL);
+ if (!reset) {
+ /* Optional reset not specified */
+ return 0;
+ } else if (IS_ERR(reset)) {
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "failed to acquire mdss reset\n");
+ }
+
+ reset_control_assert(reset);
+ /*
+ * Tests indicate that reset has to be held for some period of time,
+ * make it one frame in a typical system
+ */
+ msleep(20);
+ reset_control_deassert(reset);
+
+ reset_control_put(reset);
+
+ return 0;
+}
+
+/*
+ * MDP5 MDSS uses at most three specified clocks.
+ */
+#define MDP5_MDSS_NUM_CLOCKS 3
+static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
+{
+ struct clk_bulk_data *bulk;
+ int num_clocks = 0;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
+ if (!bulk)
+ return -ENOMEM;
+
+ bulk[num_clocks++].id = "iface";
+ bulk[num_clocks++].id = "bus";
+ bulk[num_clocks++].id = "vsync";
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
+ if (ret)
+ return ret;
+
+ *clocks = bulk;
+
+ return num_clocks;
+}
+
+static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
+{
+ struct msm_mdss *msm_mdss;
+ int ret;
+ int irq;
+
+ ret = msm_mdss_reset(&pdev->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
+ if (!msm_mdss)
+ return ERR_PTR(-ENOMEM);
+
+ msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
+ if (IS_ERR(msm_mdss->mmio))
+ return ERR_CAST(msm_mdss->mmio);
+
+ dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+
+ ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (is_mdp5)
+ ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
+ else
+ ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
+ return ERR_PTR(ret);
+ }
+ msm_mdss->num_clocks = ret;
+ msm_mdss->is_mdp5 = is_mdp5;
+
+ msm_mdss->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return ERR_PTR(irq);
+
+ ret = _msm_mdss_irq_domain_add(msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+
+ irq_set_chained_handler_and_data(irq, msm_mdss_irq,
+ msm_mdss);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return msm_mdss;
+}
+
+static int __maybe_unused mdss_runtime_suspend(struct device *dev)
+{
+ struct msm_mdss *mdss = dev_get_drvdata(dev);
+
+ DBG("");
+
+ return msm_mdss_disable(mdss);
+}
+
+static int __maybe_unused mdss_runtime_resume(struct device *dev)
+{
+ struct msm_mdss *mdss = dev_get_drvdata(dev);
+
+ DBG("");
+
+ return msm_mdss_enable(mdss);
+}
+
+static int __maybe_unused mdss_pm_suspend(struct device *dev)
+{
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mdss_runtime_suspend(dev);
+}
+
+static int __maybe_unused mdss_pm_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mdss_runtime_resume(dev);
+}
+
+static const struct dev_pm_ops mdss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
+ SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
+};
+
+static int mdss_probe(struct platform_device *pdev)
+{
+ struct msm_mdss *mdss;
+ bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ mdss = msm_mdss_init(pdev, is_mdp5);
+ if (IS_ERR(mdss))
+ return PTR_ERR(mdss);
+
+ platform_set_drvdata(pdev, mdss);
+
+ /*
+ * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+ * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+ * Populate the children devices, find the MDP5/DPU node, and then add
+ * the interfaces to our components list.
+ */
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to populate children devices\n");
+ msm_mdss_destroy(mdss);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mdss_remove(struct platform_device *pdev)
+{
+ struct msm_mdss *mdss = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+
+ msm_mdss_destroy(mdss);
+
+ return 0;
+}
+
+static const struct of_device_id mdss_dt_match[] = {
+ { .compatible = "qcom,mdss" },
+ { .compatible = "qcom,msm8998-mdss" },
+ { .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,sdm845-mdss" },
+ { .compatible = "qcom,sc7180-mdss" },
+ { .compatible = "qcom,sc7280-mdss" },
+ { .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sm8150-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_dt_match);
+
+static struct platform_driver mdss_platform_driver = {
+ .probe = mdss_probe,
+ .remove = mdss_remove,
+ .driver = {
+ .name = "msm-mdss",
+ .of_match_table = mdss_dt_match,
+ .pm = &mdss_pm_ops,
+ },
+};
+
+void __init msm_mdss_register(void)
+{
+ platform_driver_register(&mdss_platform_driver);
+}
+
+void __exit msm_mdss_unregister(void)
+{
+ platform_driver_unregister(&mdss_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
new file mode 100644
index 000000000..de158e1bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_MMU_H__
+#define __MSM_MMU_H__
+
+#include <linux/iommu.h>
+
+struct msm_mmu_funcs {
+ void (*detach)(struct msm_mmu *mmu);
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+ size_t len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
+ void (*destroy)(struct msm_mmu *mmu);
+ void (*resume_translation)(struct msm_mmu *mmu);
+};
+
+enum msm_mmu_type {
+ MSM_MMU_GPUMMU,
+ MSM_MMU_IOMMU,
+ MSM_MMU_IOMMU_PAGETABLE,
+};
+
+struct msm_mmu {
+ const struct msm_mmu_funcs *funcs;
+ struct device *dev;
+ int (*handler)(void *arg, unsigned long iova, int flags, void *data);
+ void *arg;
+ enum msm_mmu_type type;
+};
+
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
+ const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
+{
+ mmu->dev = dev;
+ mmu->funcs = funcs;
+ mmu->type = type;
+}
+
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+
+static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
+ int (*handler)(void *arg, unsigned long iova, int flags, void *data))
+{
+ mmu->arg = arg;
+ mmu->handler = handler;
+}
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error);
+
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
+ int *asid);
+
+#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
new file mode 100644
index 000000000..3d3da79fe
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+/* For profiling, userspace can:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/gpu
+ *
+ * This will enable performance counters/profiling to track the busy time
+ * and any gpu specific performance counters that are supported.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_file.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+
+struct msm_perf_state {
+ struct drm_device *dev;
+
+ bool open;
+ int cnt;
+ struct mutex read_lock;
+
+ char buf[256];
+ int buftot, bufpos;
+
+ unsigned long next_jiffies;
+};
+
+#define SAMPLE_TIME (HZ/4)
+
+/* wait for next sample time: */
+static int wait_sample(struct msm_perf_state *perf)
+{
+ unsigned long start_jiffies = jiffies;
+
+ if (time_after(perf->next_jiffies, start_jiffies)) {
+ unsigned long remaining_jiffies =
+ perf->next_jiffies - start_jiffies;
+ int ret = schedule_timeout_interruptible(remaining_jiffies);
+ if (ret > 0) {
+ /* interrupted */
+ return -ERESTARTSYS;
+ }
+ }
+ perf->next_jiffies += SAMPLE_TIME;
+ return 0;
+}
+
+static int refill_buf(struct msm_perf_state *perf)
+{
+ struct msm_drm_private *priv = perf->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ char *ptr = perf->buf;
+ int rem = sizeof(perf->buf);
+ int i, n;
+
+ if ((perf->cnt++ % 32) == 0) {
+ /* Header line: */
+ n = snprintf(ptr, rem, "%%BUSY");
+ ptr += n;
+ rem -= n;
+
+ for (i = 0; i < gpu->num_perfcntrs; i++) {
+ const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
+ n = snprintf(ptr, rem, "\t%s", perfcntr->name);
+ ptr += n;
+ rem -= n;
+ }
+ } else {
+ /* Sample line: */
+ uint32_t activetime = 0, totaltime = 0;
+ uint32_t cntrs[5];
+ uint32_t val;
+ int ret;
+
+ /* sleep until next sample time: */
+ ret = wait_sample(perf);
+ if (ret)
+ return ret;
+
+ ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime,
+ ARRAY_SIZE(cntrs), cntrs);
+ if (ret < 0)
+ return ret;
+
+ val = totaltime ? 1000 * activetime / totaltime : 0;
+ n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
+ ptr += n;
+ rem -= n;
+
+ for (i = 0; i < ret; i++) {
+ /* cycle counters (I think).. convert to MHz.. */
+ val = cntrs[i] / 10000;
+ n = snprintf(ptr, rem, "\t%5d.%02d",
+ val / 100, val % 100);
+ ptr += n;
+ rem -= n;
+ }
+ }
+
+ n = snprintf(ptr, rem, "\n");
+ ptr += n;
+ rem -= n;
+
+ perf->bufpos = 0;
+ perf->buftot = ptr - perf->buf;
+
+ return 0;
+}
+
+static ssize_t perf_read(struct file *file, char __user *buf,
+ size_t sz, loff_t *ppos)
+{
+ struct msm_perf_state *perf = file->private_data;
+ int n = 0, ret = 0;
+
+ mutex_lock(&perf->read_lock);
+
+ if (perf->bufpos >= perf->buftot) {
+ ret = refill_buf(perf);
+ if (ret)
+ goto out;
+ }
+
+ n = min((int)sz, perf->buftot - perf->bufpos);
+ if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ perf->bufpos += n;
+ *ppos += n;
+
+out:
+ mutex_unlock(&perf->read_lock);
+ if (ret)
+ return ret;
+ return n;
+}
+
+static int perf_open(struct inode *inode, struct file *file)
+{
+ struct msm_perf_state *perf = inode->i_private;
+ struct drm_device *dev = perf->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret = 0;
+
+ if (!gpu)
+ return -ENODEV;
+
+ mutex_lock(&gpu->lock);
+
+ if (perf->open) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ file->private_data = perf;
+ perf->open = true;
+ perf->cnt = 0;
+ perf->buftot = 0;
+ perf->bufpos = 0;
+ msm_gpu_perfcntr_start(gpu);
+ perf->next_jiffies = jiffies + SAMPLE_TIME;
+
+out:
+ mutex_unlock(&gpu->lock);
+ return ret;
+}
+
+static int perf_release(struct inode *inode, struct file *file)
+{
+ struct msm_perf_state *perf = inode->i_private;
+ struct msm_drm_private *priv = perf->dev->dev_private;
+ msm_gpu_perfcntr_stop(priv->gpu);
+ perf->open = false;
+ return 0;
+}
+
+
+static const struct file_operations perf_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = perf_open,
+ .read = perf_read,
+ .llseek = no_llseek,
+ .release = perf_release,
+};
+
+int msm_perf_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_perf_state *perf;
+
+ /* only create on first minor: */
+ if (priv->perf)
+ return 0;
+
+ perf = kzalloc(sizeof(*perf), GFP_KERNEL);
+ if (!perf)
+ return -ENOMEM;
+
+ perf->dev = minor->dev;
+
+ mutex_init(&perf->read_lock);
+ priv->perf = perf;
+
+ debugfs_create_file("perf", S_IFREG | S_IRUGO, minor->debugfs_root,
+ perf, &perf_debugfs_fops);
+ return 0;
+}
+
+void msm_perf_debugfs_cleanup(struct msm_drm_private *priv)
+{
+ struct msm_perf_state *perf = priv->perf;
+
+ if (!perf)
+ return;
+
+ priv->perf = NULL;
+
+ mutex_destroy(&perf->read_lock);
+
+ kfree(perf);
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
new file mode 100644
index 000000000..db2f847c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+/* For debugging crashes, userspace can:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
+ *
+ * to log the cmdstream in a format that is understood by freedreno/cffdump
+ * utility. By comparing the last successfully completed fence #, to the
+ * cmdstream for the next fence, you can narrow down which process and submit
+ * caused the gpu crash/lockup.
+ *
+ * Additionally:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
+ *
+ * will capture just the cmdstream from submits which triggered a GPU hang.
+ *
+ * This bypasses drm_debugfs_create_files() mainly because we need to use
+ * our own fops for a bit more control. In particular, we don't want to
+ * do anything if userspace doesn't have the debugfs file open.
+ *
+ * The module-param "rd_full", which defaults to false, enables snapshotting
+ * all (non-written) buffers in the submit, rather than just cmdstream bo's.
+ * This is useful to capture the contents of (for example) vbo's or textures,
+ * or shader programs (if not emitted inline in cmdstream).
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/debugfs.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
+#include <drm/drm_file.h>
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+bool rd_full = false;
+MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
+module_param_named(rd_full, rd_full, bool, 0600);
+
+#ifdef CONFIG_DEBUG_FS
+
+enum rd_sect_type {
+ RD_NONE,
+ RD_TEST, /* ascii text */
+ RD_CMD, /* ascii text */
+ RD_GPUADDR, /* u32 gpuaddr, u32 size */
+ RD_CONTEXT, /* raw dump */
+ RD_CMDSTREAM, /* raw dump */
+ RD_CMDSTREAM_ADDR, /* gpu addr of cmdstream */
+ RD_PARAM, /* u32 param_type, u32 param_val, u32 bitlen */
+ RD_FLUSH, /* empty, clear previous params */
+ RD_PROGRAM, /* shader program, raw dump */
+ RD_VERT_SHADER,
+ RD_FRAG_SHADER,
+ RD_BUFFER_CONTENTS,
+ RD_GPU_ID,
+ RD_CHIP_ID,
+};
+
+#define BUF_SZ 512 /* should be power of 2 */
+
+/* space used: */
+#define circ_count(circ) \
+ (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
+#define circ_count_to_end(circ) \
+ (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
+/* space available: */
+#define circ_space(circ) \
+ (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
+#define circ_space_to_end(circ) \
+ (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
+
+struct msm_rd_state {
+ struct drm_device *dev;
+
+ bool open;
+
+ /* current submit to read out: */
+ struct msm_gem_submit *submit;
+
+ /* fifo access is synchronized on the producer side by
+ * gpu->lock held by submit code (otherwise we could
+ * end up w/ cmds logged in different order than they
+ * were executed). And read_lock synchronizes the reads
+ */
+ struct mutex read_lock;
+
+ wait_queue_head_t fifo_event;
+ struct circ_buf fifo;
+
+ char buf[BUF_SZ];
+};
+
+static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
+{
+ struct circ_buf *fifo = &rd->fifo;
+ const char *ptr = buf;
+
+ while (sz > 0) {
+ char *fptr = &fifo->buf[fifo->head];
+ int n;
+
+ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+ if (!rd->open)
+ return;
+
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_SPACE_TO_END() does not access the tail more
+ * than once.
+ */
+ n = min(sz, circ_space_to_end(&rd->fifo));
+ memcpy(fptr, ptr, n);
+
+ smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
+ sz -= n;
+ ptr += n;
+
+ wake_up_all(&rd->fifo_event);
+ }
+}
+
+static void rd_write_section(struct msm_rd_state *rd,
+ enum rd_sect_type type, const void *buf, int sz)
+{
+ rd_write(rd, &type, 4);
+ rd_write(rd, &sz, 4);
+ rd_write(rd, buf, sz);
+}
+
+static ssize_t rd_read(struct file *file, char __user *buf,
+ size_t sz, loff_t *ppos)
+{
+ struct msm_rd_state *rd = file->private_data;
+ struct circ_buf *fifo = &rd->fifo;
+ const char *fptr = &fifo->buf[fifo->tail];
+ int n = 0, ret = 0;
+
+ mutex_lock(&rd->read_lock);
+
+ ret = wait_event_interruptible(rd->fifo_event,
+ circ_count(&rd->fifo) > 0);
+ if (ret)
+ goto out;
+
+ /* Note that smp_load_acquire() is not strictly required
+ * as CIRC_CNT_TO_END() does not access the head more than
+ * once.
+ */
+ n = min_t(int, sz, circ_count_to_end(&rd->fifo));
+ if (copy_to_user(buf, fptr, n)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
+ *ppos += n;
+
+ wake_up_all(&rd->fifo_event);
+
+out:
+ mutex_unlock(&rd->read_lock);
+ if (ret)
+ return ret;
+ return n;
+}
+
+static int rd_open(struct inode *inode, struct file *file)
+{
+ struct msm_rd_state *rd = inode->i_private;
+ struct drm_device *dev = rd->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ uint64_t val;
+ uint32_t gpu_id;
+ uint32_t zero = 0;
+ int ret = 0;
+
+ if (!gpu)
+ return -ENODEV;
+
+ mutex_lock(&gpu->lock);
+
+ if (rd->open) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ file->private_data = rd;
+ rd->open = true;
+
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
+ /* the parsing tools need to know gpu-id to know which
+ * register database to load.
+ *
+ * Note: These particular params do not require a context
+ */
+ gpu->funcs->get_param(gpu, NULL, MSM_PARAM_GPU_ID, &val, &zero);
+ gpu_id = val;
+
+ rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
+
+ gpu->funcs->get_param(gpu, NULL, MSM_PARAM_CHIP_ID, &val, &zero);
+ rd_write_section(rd, RD_CHIP_ID, &val, sizeof(val));
+
+out:
+ mutex_unlock(&gpu->lock);
+ return ret;
+}
+
+static int rd_release(struct inode *inode, struct file *file)
+{
+ struct msm_rd_state *rd = inode->i_private;
+
+ rd->open = false;
+ wake_up_all(&rd->fifo_event);
+
+ return 0;
+}
+
+
+static const struct file_operations rd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = rd_open,
+ .read = rd_read,
+ .llseek = no_llseek,
+ .release = rd_release,
+};
+
+
+static void rd_cleanup(struct msm_rd_state *rd)
+{
+ if (!rd)
+ return;
+
+ mutex_destroy(&rd->read_lock);
+ kfree(rd);
+}
+
+static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
+{
+ struct msm_rd_state *rd;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return ERR_PTR(-ENOMEM);
+
+ rd->dev = minor->dev;
+ rd->fifo.buf = rd->buf;
+
+ mutex_init(&rd->read_lock);
+
+ init_waitqueue_head(&rd->fifo_event);
+
+ debugfs_create_file(name, S_IFREG | S_IRUGO, minor->debugfs_root, rd,
+ &rd_debugfs_fops);
+
+ return rd;
+}
+
+int msm_rd_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_rd_state *rd;
+ int ret;
+
+ /* only create on first minor: */
+ if (priv->rd)
+ return 0;
+
+ rd = rd_init(minor, "rd");
+ if (IS_ERR(rd)) {
+ ret = PTR_ERR(rd);
+ goto fail;
+ }
+
+ priv->rd = rd;
+
+ rd = rd_init(minor, "hangrd");
+ if (IS_ERR(rd)) {
+ ret = PTR_ERR(rd);
+ goto fail;
+ }
+
+ priv->hangrd = rd;
+
+ return 0;
+
+fail:
+ msm_rd_debugfs_cleanup(priv);
+ return ret;
+}
+
+void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
+{
+ rd_cleanup(priv->rd);
+ priv->rd = NULL;
+
+ rd_cleanup(priv->hangrd);
+ priv->hangrd = NULL;
+}
+
+static void snapshot_buf(struct msm_rd_state *rd,
+ struct msm_gem_submit *submit, int idx,
+ uint64_t iova, uint32_t size, bool full)
+{
+ struct msm_gem_object *obj = submit->bos[idx].obj;
+ unsigned offset = 0;
+ const char *buf;
+
+ if (iova) {
+ offset = iova - submit->bos[idx].iova;
+ } else {
+ iova = submit->bos[idx].iova;
+ size = obj->base.size;
+ }
+
+ /*
+ * Always write the GPUADDR header so can get a complete list of all the
+ * buffers in the cmd
+ */
+ rd_write_section(rd, RD_GPUADDR,
+ (uint32_t[3]){ iova, size, iova >> 32 }, 12);
+
+ if (!full)
+ return;
+
+ /* But only dump the contents of buffers marked READ */
+ if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
+ return;
+
+ msm_gem_lock(&obj->base);
+ buf = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(buf))
+ goto out_unlock;
+
+ buf += offset;
+
+ rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
+
+ msm_gem_put_vaddr_locked(&obj->base);
+
+out_unlock:
+ msm_gem_unlock(&obj->base);
+}
+
+/* called under gpu->lock */
+void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...)
+{
+ struct task_struct *task;
+ char msg[256];
+ int i, n;
+
+ if (!rd->open)
+ return;
+
+ /* writing into fifo is serialized by caller, and
+ * rd->read_lock is used to serialize the reads
+ */
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
+
+ if (fmt) {
+ va_list args;
+
+ va_start(args, fmt);
+ n = vscnprintf(msg, sizeof(msg), fmt, args);
+ va_end(args);
+
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+ }
+
+ rcu_read_lock();
+ task = pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ TASK_COMM_LEN, task->comm,
+ pid_nr(submit->pid), submit->seqno);
+ } else {
+ n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
+ pid_nr(submit->pid), submit->seqno);
+ }
+ rcu_read_unlock();
+
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+
+ for (i = 0; i < submit->nr_bos; i++)
+ snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
+
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!should_dump(submit, i)) {
+ snapshot_buf(rd, submit, submit->cmd[i].idx,
+ submit->cmd[i].iova, szd * 4, true);
+ }
+ }
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ uint64_t iova = submit->cmd[i].iova;
+ uint32_t szd = submit->cmd[i].size; /* in dwords */
+
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets, we've logged the buffer, the
+ * parser tool will follow the IB based on the logged
+ * buffer/gpuaddr, so nothing more to do.
+ */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ case MSM_SUBMIT_CMD_BUF:
+ rd_write_section(rd, RD_CMDSTREAM_ADDR,
+ (uint32_t[3]){ iova, szd, iova >> 32 }, 12);
+ break;
+ }
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 000000000..57a8e9564
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include "msm_ringbuffer.h"
+#include "msm_gpu.h"
+
+static uint num_hw_submissions = 8;
+MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
+module_param(num_hw_submissions, uint, 0600);
+
+static struct dma_fence *msm_job_run(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+ struct msm_fence_context *fctx = submit->ring->fctx;
+ struct msm_gpu *gpu = submit->gpu;
+ int i;
+
+ submit->hw_fence = msm_fence_alloc(fctx);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ msm_gem_lock(obj);
+ msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
+ msm_gem_unpin_locked(obj);
+ submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED);
+ msm_gem_unlock(obj);
+ }
+
+ /* TODO move submit path over to using a per-ring lock.. */
+ mutex_lock(&gpu->lock);
+
+ msm_gpu_submit(gpu, submit);
+
+ mutex_unlock(&gpu->lock);
+
+ return dma_fence_get(submit->hw_fence);
+}
+
+static void msm_job_free(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+
+ drm_sched_job_cleanup(job);
+ msm_gem_submit_put(submit);
+}
+
+static const struct drm_sched_backend_ops msm_sched_ops = {
+ .run_job = msm_job_run,
+ .free_job = msm_job_free
+};
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ void *memptrs, uint64_t memptrs_iova)
+{
+ struct msm_ringbuffer *ring;
+ long sched_timeout;
+ char name[32];
+ int ret;
+
+ /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ring->gpu = gpu;
+ ring->id = id;
+
+ ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
+ check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
+ gpu->aspace, &ring->bo, &ring->iova);
+
+ if (IS_ERR(ring->start)) {
+ ret = PTR_ERR(ring->start);
+ ring->start = NULL;
+ goto fail;
+ }
+
+ msm_gem_object_set_name(ring->bo, "ring%d", id);
+
+ ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+ ring->next = ring->start;
+ ring->cur = ring->start;
+
+ ring->memptrs = memptrs;
+ ring->memptrs_iova = memptrs_iova;
+
+ /* currently managing hangcheck ourselves: */
+ sched_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ ret = drm_sched_init(&ring->sched, &msm_sched_ops,
+ num_hw_submissions, 0, sched_timeout,
+ NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
+ if (ret) {
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&ring->submits);
+ spin_lock_init(&ring->submit_lock);
+ spin_lock_init(&ring->preempt_lock);
+
+ snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
+
+ ring->fctx = msm_fence_context_alloc(gpu->dev, &ring->memptrs->fence, name);
+
+ return ring;
+
+fail:
+ msm_ringbuffer_destroy(ring);
+ return ERR_PTR(ret);
+}
+
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
+{
+ if (IS_ERR_OR_NULL(ring))
+ return;
+
+ drm_sched_fini(&ring->sched);
+
+ msm_fence_context_free(ring->fctx);
+
+ msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
+
+ kfree(ring);
+}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 000000000..698b333ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __MSM_RINGBUFFER_H__
+#define __MSM_RINGBUFFER_H__
+
+#include "drm/gpu_scheduler.h"
+#include "msm_drv.h"
+
+#define rbmemptr(ring, member) \
+ ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+
+#define rbmemptr_stats(ring, index, member) \
+ (rbmemptr((ring), stats) + \
+ ((index) * sizeof(struct msm_gpu_submit_stats)) + \
+ offsetof(struct msm_gpu_submit_stats, member))
+
+struct msm_gpu_submit_stats {
+ u64 cpcycles_start;
+ u64 cpcycles_end;
+ u64 alwayson_start;
+ u64 alwayson_end;
+};
+
+#define MSM_GPU_SUBMIT_STATS_COUNT 64
+
+struct msm_rbmemptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t fence;
+
+ volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
+ volatile u64 ttbr0;
+};
+
+struct msm_cp_state {
+ uint64_t ib1_base, ib2_base;
+ uint32_t ib1_rem, ib2_rem;
+};
+
+struct msm_ringbuffer {
+ struct msm_gpu *gpu;
+ int id;
+ struct drm_gem_object *bo;
+ uint32_t *start, *end, *cur, *next;
+
+ /*
+ * The job scheduler for this ring.
+ */
+ struct drm_gpu_scheduler sched;
+
+ /*
+ * List of in-flight submits on this ring. Protected by submit_lock.
+ *
+ * Currently just submits that are already written into the ring, not
+ * submits that are still in drm_gpu_scheduler's queues. At a later
+ * step we could probably move to letting drm_gpu_scheduler manage
+ * hangcheck detection and keep track of submit jobs that are in-
+ * flight.
+ */
+ struct list_head submits;
+ spinlock_t submit_lock;
+
+ uint64_t iova;
+ uint32_t hangcheck_fence;
+ struct msm_rbmemptrs *memptrs;
+ uint64_t memptrs_iova;
+ struct msm_fence_context *fctx;
+
+ /**
+ * hangcheck_progress_retries:
+ *
+ * The number of extra hangcheck duration cycles that we have given
+ * due to it appearing that the GPU is making forward progress.
+ *
+ * For GPU generations which support progress detection (see.
+ * msm_gpu_funcs::progress()), if the GPU appears to be making progress
+ * (ie. the CP has advanced in the command stream, we'll allow up to
+ * DRM_MSM_HANGCHECK_PROGRESS_RETRIES expirations of the hangcheck timer
+ * before killing the job. But to detect progress we need two sample
+ * points, so the duration of the hangcheck timer is halved. In other
+ * words we'll let the submit run for up to:
+ *
+ * (DRM_MSM_HANGCHECK_DEFAULT_PERIOD / 2) * (DRM_MSM_HANGCHECK_PROGRESS_RETRIES + 1)
+ */
+ int hangcheck_progress_retries;
+
+ /**
+ * last_cp_state: The state of the CP at the last call to gpu->progress()
+ */
+ struct msm_cp_state last_cp_state;
+
+ /*
+ * preempt_lock protects preemption and serializes wptr updates against
+ * preemption. Can be aquired from irq context.
+ */
+ spinlock_t preempt_lock;
+};
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ void *memptrs, uint64_t memptrs_iova);
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
+
+/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
+
+static inline void
+OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
+{
+ /*
+ * ring->next points to the current command being written - it won't be
+ * committed as ring->cur until the flush
+ */
+ if (ring->next == ring->end)
+ ring->next = ring->start;
+ *(ring->next++) = data;
+}
+
+#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
new file mode 100644
index 000000000..0e803125a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kref.h>
+#include <linux/uaccess.h>
+
+#include "msm_gpu.h"
+
+int msm_file_private_set_sysprof(struct msm_file_private *ctx,
+ struct msm_gpu *gpu, int sysprof)
+{
+ /*
+ * Since pm_runtime and sysprof_active are both refcounts, we
+ * call apply the new value first, and then unwind the previous
+ * value
+ */
+
+ switch (sysprof) {
+ default:
+ return -EINVAL;
+ case 2:
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ fallthrough;
+ case 1:
+ refcount_inc(&gpu->sysprof_active);
+ fallthrough;
+ case 0:
+ break;
+ }
+
+ /* unwind old value: */
+ switch (ctx->sysprof) {
+ case 2:
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ fallthrough;
+ case 1:
+ refcount_dec(&gpu->sysprof_active);
+ fallthrough;
+ case 0:
+ break;
+ }
+
+ ctx->sysprof = sysprof;
+
+ return 0;
+}
+
+void __msm_file_private_destroy(struct kref *kref)
+{
+ struct msm_file_private *ctx = container_of(kref,
+ struct msm_file_private, ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
+ if (!ctx->entities[i])
+ continue;
+
+ drm_sched_entity_destroy(ctx->entities[i]);
+ kfree(ctx->entities[i]);
+ }
+
+ msm_gem_address_space_put(ctx->aspace);
+ kfree(ctx->comm);
+ kfree(ctx->cmdline);
+ kfree(ctx);
+}
+
+void msm_submitqueue_destroy(struct kref *kref)
+{
+ struct msm_gpu_submitqueue *queue = container_of(kref,
+ struct msm_gpu_submitqueue, ref);
+
+ idr_destroy(&queue->fence_idr);
+
+ msm_file_private_put(queue->ctx);
+
+ kfree(queue);
+}
+
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ if (!ctx)
+ return NULL;
+
+ read_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ kref_get(&entry->ref);
+ read_unlock(&ctx->queuelock);
+
+ return entry;
+ }
+ }
+
+ read_unlock(&ctx->queuelock);
+ return NULL;
+}
+
+void msm_submitqueue_close(struct msm_file_private *ctx)
+{
+ struct msm_gpu_submitqueue *entry, *tmp;
+
+ if (!ctx)
+ return;
+
+ /*
+ * No lock needed in close and there won't
+ * be any more user ioctls coming our way
+ */
+ list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
+ list_del(&entry->node);
+ msm_submitqueue_put(entry);
+ }
+}
+
+static struct drm_sched_entity *
+get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
+ unsigned ring_nr, enum drm_sched_priority sched_prio)
+{
+ static DEFINE_MUTEX(entity_lock);
+ unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
+
+ /* We should have already validated that the requested priority is
+ * valid by the time we get here.
+ */
+ if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&entity_lock);
+
+ if (!ctx->entities[idx]) {
+ struct drm_sched_entity *entity;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ int ret;
+
+ entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
+
+ ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
+ if (ret) {
+ mutex_unlock(&entity_lock);
+ kfree(entity);
+ return ERR_PTR(ret);
+ }
+
+ ctx->entities[idx] = entity;
+ }
+
+ mutex_unlock(&entity_lock);
+
+ return ctx->entities[idx];
+}
+
+int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id)
+{
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_gpu_submitqueue *queue;
+ enum drm_sched_priority sched_prio;
+ unsigned ring_nr;
+ int ret;
+
+ if (!ctx)
+ return -ENODEV;
+
+ if (!priv->gpu)
+ return -ENODEV;
+
+ ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
+ if (ret)
+ return ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+
+ if (!queue)
+ return -ENOMEM;
+
+ kref_init(&queue->ref);
+ queue->flags = flags;
+ queue->ring_nr = ring_nr;
+
+ queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
+ ring_nr, sched_prio);
+ if (IS_ERR(queue->entity)) {
+ ret = PTR_ERR(queue->entity);
+ kfree(queue);
+ return ret;
+ }
+
+ write_lock(&ctx->queuelock);
+
+ queue->ctx = msm_file_private_get(ctx);
+ queue->id = ctx->queueid++;
+
+ if (id)
+ *id = queue->id;
+
+ idr_init(&queue->fence_idr);
+ spin_lock_init(&queue->idr_lock);
+ mutex_init(&queue->lock);
+
+ list_add_tail(&queue->node, &ctx->submitqueues);
+
+ write_unlock(&ctx->queuelock);
+
+ return 0;
+}
+
+/*
+ * Create the default submit-queue (id==0), used for backwards compatibility
+ * for userspace that pre-dates the introduction of submitqueues.
+ */
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = drm->dev_private;
+ int default_prio, max_priority;
+
+ if (!priv->gpu)
+ return -ENODEV;
+
+ max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
+
+ /*
+ * Pick a medium priority level as default. Lower numeric value is
+ * higher priority, so round-up to pick a priority that is not higher
+ * than the middle priority level.
+ */
+ default_prio = DIV_ROUND_UP(max_priority, 2);
+
+ return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
+}
+
+static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
+ struct drm_msm_submitqueue_query *args)
+{
+ size_t size = min_t(size_t, args->len, sizeof(queue->faults));
+ int ret;
+
+ /* If a zero length was passed in, return the data size we expect */
+ if (!args->len) {
+ args->len = sizeof(queue->faults);
+ return 0;
+ }
+
+ /* Set the length to the actual size of the data */
+ args->len = size;
+
+ ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
+
+ return ret ? -EFAULT : 0;
+}
+
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args)
+{
+ struct msm_gpu_submitqueue *queue;
+ int ret = -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ queue = msm_submitqueue_get(ctx, args->id);
+ if (!queue)
+ return -ENOENT;
+
+ if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
+ ret = msm_submitqueue_query_faults(queue, args);
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ if (!ctx)
+ return 0;
+
+ /*
+ * id 0 is the "default" queue and can't be destroyed
+ * by the user
+ */
+ if (!id)
+ return -ENOENT;
+
+ write_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ list_del(&entry->node);
+ write_unlock(&ctx->queuelock);
+
+ msm_submitqueue_put(entry);
+ return 0;
+ }
+ }
+
+ write_unlock(&ctx->queuelock);
+ return -ENOENT;
+}
+