From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/msm/Kconfig | 165 + drivers/gpu/drm/msm/Makefile | 149 + drivers/gpu/drm/msm/NOTES | 87 + drivers/gpu/drm/msm/adreno/a2xx.xml.h | 3212 ++++++++ drivers/gpu/drm/msm/adreno/a2xx_gpu.c | 550 ++ drivers/gpu/drm/msm/adreno/a2xx_gpu.h | 21 + drivers/gpu/drm/msm/adreno/a3xx.xml.h | 3247 ++++++++ drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 604 ++ drivers/gpu/drm/msm/adreno/a3xx_gpu.h | 26 + drivers/gpu/drm/msm/adreno/a4xx.xml.h | 4349 +++++++++++ drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 730 ++ drivers/gpu/drm/msm/adreno/a4xx_gpu.h | 23 + drivers/gpu/drm/msm/adreno/a5xx.xml.h | 5492 ++++++++++++++ drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 159 + drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 1786 +++++ drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 174 + drivers/gpu/drm/msm/adreno/a5xx_power.c | 390 + drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 302 + drivers/gpu/drm/msm/adreno/a6xx.xml.h | 7780 ++++++++++++++++++++ drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 1658 +++++ drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 190 + drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h | 483 ++ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2078 ++++++ drivers/gpu/drm/msm/adreno/a6xx_gpu.h | 91 + drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c | 1342 ++++ drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h | 446 ++ drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 734 ++ drivers/gpu/drm/msm/adreno/a6xx_hfi.h | 184 + drivers/gpu/drm/msm/adreno/adreno_common.xml.h | 685 ++ drivers/gpu/drm/msm/adreno/adreno_device.c | 746 ++ drivers/gpu/drm/msm/adreno/adreno_gpu.c | 1095 +++ drivers/gpu/drm/msm/adreno/adreno_gpu.h | 443 ++ drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h | 2365 ++++++ drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h | 80 + drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 534 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h | 121 + drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 1626 ++++ drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h | 303 + drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 2556 +++++++ drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h | 227 + drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h | 403 + .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c | 803 ++ .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c | 716 ++ .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c | 752 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c | 1043 +++ drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h | 88 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c | 1959 +++++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h | 895 +++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c | 713 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h | 277 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c | 206 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h | 80 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c | 125 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h | 98 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c | 574 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h | 77 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c | 382 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h | 116 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c | 206 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h | 113 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h | 459 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c | 85 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h | 67 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c | 320 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h | 186 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c | 815 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h | 395 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c | 331 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h | 159 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c | 492 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h | 360 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c | 264 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h | 120 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c | 277 + drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h | 115 + drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h | 45 + drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 1337 ++++ drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h | 207 + drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c | 1539 ++++ drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h | 120 + drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 674 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 112 + drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h | 978 +++ drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c | 358 + drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h | 75 + drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c | 89 + drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h | 31 + drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h | 1155 +++ drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h | 1181 +++ drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 666 ++ drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c | 175 + drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c | 213 + drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c | 111 + drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 600 ++ drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h | 219 + drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c | 445 ++ .../gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c | 121 + drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c | 161 + drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c | 419 ++ drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h | 1979 +++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c | 1333 ++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h | 126 + drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c | 203 + drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 1360 ++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c | 764 ++ drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h | 78 + drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c | 370 + drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c | 126 + drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 1009 +++ drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h | 327 + drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c | 168 + drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h | 36 + drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c | 175 + drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h | 46 + drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c | 1048 +++ drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 408 + drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h | 87 + drivers/gpu/drm/msm/disp/mdp_common.xml.h | 111 + drivers/gpu/drm/msm/disp/mdp_format.c | 183 + drivers/gpu/drm/msm/disp/mdp_kms.c | 138 + drivers/gpu/drm/msm/disp/mdp_kms.h | 142 + drivers/gpu/drm/msm/disp/msm_disp_snapshot.c | 138 + drivers/gpu/drm/msm/disp/msm_disp_snapshot.h | 144 + drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c | 196 + drivers/gpu/drm/msm/dp/dp_audio.c | 667 ++ drivers/gpu/drm/msm/dp/dp_audio.h | 74 + drivers/gpu/drm/msm/dp/dp_aux.c | 541 ++ drivers/gpu/drm/msm/dp/dp_aux.h | 23 + drivers/gpu/drm/msm/dp/dp_catalog.c | 1096 +++ drivers/gpu/drm/msm/dp/dp_catalog.h | 138 + drivers/gpu/drm/msm/dp/dp_ctrl.c | 2049 ++++++ drivers/gpu/drm/msm/dp/dp_ctrl.h | 40 + drivers/gpu/drm/msm/dp/dp_debug.c | 300 + drivers/gpu/drm/msm/dp/dp_debug.h | 74 + drivers/gpu/drm/msm/dp/dp_display.c | 1784 +++++ drivers/gpu/drm/msm/dp/dp_display.h | 42 + drivers/gpu/drm/msm/dp/dp_drm.c | 178 + drivers/gpu/drm/msm/dp/dp_drm.h | 36 + drivers/gpu/drm/msm/dp/dp_hpd.c | 67 + drivers/gpu/drm/msm/dp/dp_hpd.h | 78 + drivers/gpu/drm/msm/dp/dp_link.c | 1223 +++ drivers/gpu/drm/msm/dp/dp_link.h | 156 + drivers/gpu/drm/msm/dp/dp_panel.c | 464 ++ drivers/gpu/drm/msm/dp/dp_panel.h | 99 + drivers/gpu/drm/msm/dp/dp_parser.c | 293 + drivers/gpu/drm/msm/dp/dp_parser.h | 153 + drivers/gpu/drm/msm/dp/dp_power.c | 257 + drivers/gpu/drm/msm/dp/dp_power.h | 107 + drivers/gpu/drm/msm/dp/dp_reg.h | 308 + drivers/gpu/drm/msm/dsi/dsi.c | 280 + drivers/gpu/drm/msm/dsi/dsi.h | 164 + drivers/gpu/drm/msm/dsi/dsi.xml.h | 788 ++ drivers/gpu/drm/msm/dsi/dsi_cfg.c | 325 + drivers/gpu/drm/msm/dsi/dsi_cfg.h | 68 + drivers/gpu/drm/msm/dsi/dsi_host.c | 2630 +++++++ drivers/gpu/drm/msm/dsi/dsi_manager.c | 681 ++ drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h | 227 + drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h | 309 + drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h | 237 + drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h | 384 + drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h | 286 + drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h | 483 ++ drivers/gpu/drm/msm/dsi/mmss_cc.xml.h | 131 + drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 855 +++ drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 134 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c | 1061 +++ drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c | 1086 +++ drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c | 148 + drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c | 822 +++ drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c | 660 ++ drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c | 1104 +++ drivers/gpu/drm/msm/dsi/sfpb.xml.h | 70 + drivers/gpu/drm/msm/hdmi/hdmi.c | 614 ++ drivers/gpu/drm/msm/hdmi/hdmi.h | 267 + drivers/gpu/drm/msm/hdmi/hdmi.xml.h | 1377 ++++ drivers/gpu/drm/msm/hdmi/hdmi_audio.c | 254 + drivers/gpu/drm/msm/hdmi/hdmi_bridge.c | 357 + drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c | 1428 ++++ drivers/gpu/drm/msm/hdmi/hdmi_hpd.c | 263 + drivers/gpu/drm/msm/hdmi/hdmi_i2c.c | 267 + drivers/gpu/drm/msm/hdmi/hdmi_phy.c | 217 + drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c | 51 + drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c | 765 ++ drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c | 141 + drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c | 44 + drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c | 453 ++ drivers/gpu/drm/msm/hdmi/qfprom.xml.h | 61 + drivers/gpu/drm/msm/msm_atomic.c | 288 + drivers/gpu/drm/msm/msm_atomic_trace.h | 110 + drivers/gpu/drm/msm/msm_atomic_tracepoints.c | 3 + drivers/gpu/drm/msm/msm_debugfs.c | 339 + drivers/gpu/drm/msm/msm_debugfs.h | 14 + drivers/gpu/drm/msm/msm_drv.c | 1349 ++++ drivers/gpu/drm/msm/msm_drv.h | 558 ++ drivers/gpu/drm/msm/msm_fb.c | 285 + drivers/gpu/drm/msm/msm_fbdev.c | 204 + drivers/gpu/drm/msm/msm_fence.c | 116 + drivers/gpu/drm/msm/msm_fence.h | 78 + drivers/gpu/drm/msm/msm_gem.c | 1294 ++++ drivers/gpu/drm/msm/msm_gem.h | 336 + drivers/gpu/drm/msm/msm_gem_prime.c | 74 + drivers/gpu/drm/msm/msm_gem_shrinker.c | 239 + drivers/gpu/drm/msm/msm_gem_submit.c | 981 +++ drivers/gpu/drm/msm/msm_gem_vma.c | 193 + drivers/gpu/drm/msm/msm_gpu.c | 1033 +++ drivers/gpu/drm/msm/msm_gpu.h | 700 ++ drivers/gpu/drm/msm/msm_gpu_devfreq.c | 377 + drivers/gpu/drm/msm/msm_gpu_trace.h | 184 + drivers/gpu/drm/msm/msm_gpu_tracepoints.c | 6 + drivers/gpu/drm/msm/msm_gpummu.c | 121 + drivers/gpu/drm/msm/msm_io_utils.c | 148 + drivers/gpu/drm/msm/msm_iommu.c | 399 + drivers/gpu/drm/msm/msm_kms.h | 206 + drivers/gpu/drm/msm/msm_mdss.c | 474 ++ drivers/gpu/drm/msm/msm_mmu.h | 62 + drivers/gpu/drm/msm/msm_perf.c | 236 + drivers/gpu/drm/msm/msm_rd.c | 428 ++ drivers/gpu/drm/msm/msm_ringbuffer.c | 132 + drivers/gpu/drm/msm/msm_ringbuffer.h | 120 + drivers/gpu/drm/msm/msm_submitqueue.c | 307 + 220 files changed, 124163 insertions(+) create mode 100644 drivers/gpu/drm/msm/Kconfig create mode 100644 drivers/gpu/drm/msm/Makefile create mode 100644 drivers/gpu/drm/msm/NOTES create mode 100644 drivers/gpu/drm/msm/adreno/a2xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a2xx_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/a2xx_gpu.h create mode 100644 drivers/gpu/drm/msm/adreno/a3xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a3xx_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/a3xx_gpu.h create mode 100644 drivers/gpu/drm/msm/adreno/a4xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a4xx_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/a4xx_gpu.h create mode 100644 drivers/gpu/drm/msm/adreno/a5xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_debugfs.c create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_gpu.h create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_power.c create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_preempt.c create mode 100644 drivers/gpu/drm/msm/adreno/a6xx.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gmu.c create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gmu.h create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gpu.h create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_hfi.c create mode 100644 drivers/gpu/drm/msm/adreno/a6xx_hfi.h create mode 100644 drivers/gpu/drm/msm/adreno/adreno_common.xml.h create mode 100644 drivers/gpu/drm/msm/adreno/adreno_device.c create mode 100644 drivers/gpu/drm/msm/adreno/adreno_gpu.c create mode 100644 drivers/gpu/drm/msm/adreno/adreno_gpu.h create mode 100644 drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h create mode 100644 drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c create mode 100644 drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c create mode 100644 drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h create mode 100644 drivers/gpu/drm/msm/disp/mdp_common.xml.h create mode 100644 drivers/gpu/drm/msm/disp/mdp_format.c create mode 100644 drivers/gpu/drm/msm/disp/mdp_kms.c create mode 100644 drivers/gpu/drm/msm/disp/mdp_kms.h create mode 100644 drivers/gpu/drm/msm/disp/msm_disp_snapshot.c create mode 100644 drivers/gpu/drm/msm/disp/msm_disp_snapshot.h create mode 100644 drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c create mode 100644 drivers/gpu/drm/msm/dp/dp_audio.c create mode 100644 drivers/gpu/drm/msm/dp/dp_audio.h create mode 100644 drivers/gpu/drm/msm/dp/dp_aux.c create mode 100644 drivers/gpu/drm/msm/dp/dp_aux.h create mode 100644 drivers/gpu/drm/msm/dp/dp_catalog.c create mode 100644 drivers/gpu/drm/msm/dp/dp_catalog.h create mode 100644 drivers/gpu/drm/msm/dp/dp_ctrl.c create mode 100644 drivers/gpu/drm/msm/dp/dp_ctrl.h create mode 100644 drivers/gpu/drm/msm/dp/dp_debug.c create mode 100644 drivers/gpu/drm/msm/dp/dp_debug.h create mode 100644 drivers/gpu/drm/msm/dp/dp_display.c create mode 100644 drivers/gpu/drm/msm/dp/dp_display.h create mode 100644 drivers/gpu/drm/msm/dp/dp_drm.c create mode 100644 drivers/gpu/drm/msm/dp/dp_drm.h create mode 100644 drivers/gpu/drm/msm/dp/dp_hpd.c create mode 100644 drivers/gpu/drm/msm/dp/dp_hpd.h create mode 100644 drivers/gpu/drm/msm/dp/dp_link.c create mode 100644 drivers/gpu/drm/msm/dp/dp_link.h create mode 100644 drivers/gpu/drm/msm/dp/dp_panel.c create mode 100644 drivers/gpu/drm/msm/dp/dp_panel.h create mode 100644 drivers/gpu/drm/msm/dp/dp_parser.c create mode 100644 drivers/gpu/drm/msm/dp/dp_parser.h create mode 100644 drivers/gpu/drm/msm/dp/dp_power.c create mode 100644 drivers/gpu/drm/msm/dp/dp_power.h create mode 100644 drivers/gpu/drm/msm/dp/dp_reg.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi.c create mode 100644 drivers/gpu/drm/msm/dsi/dsi.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_cfg.c create mode 100644 drivers/gpu/drm/msm/dsi/dsi_cfg.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_host.c create mode 100644 drivers/gpu/drm/msm/dsi/dsi_manager.c create mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_10nm.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_14nm.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_20nm.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_28nm.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_28nm_8960.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/dsi_phy_7nm.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/mmss_cc.xml.h create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy.h create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c create mode 100644 drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c create mode 100644 drivers/gpu/drm/msm/dsi/sfpb.xml.h create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi.h create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi.xml.h create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_audio.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_bridge.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_hpd.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c create mode 100644 drivers/gpu/drm/msm/hdmi/qfprom.xml.h create mode 100644 drivers/gpu/drm/msm/msm_atomic.c create mode 100644 drivers/gpu/drm/msm/msm_atomic_trace.h create mode 100644 drivers/gpu/drm/msm/msm_atomic_tracepoints.c create mode 100644 drivers/gpu/drm/msm/msm_debugfs.c create mode 100644 drivers/gpu/drm/msm/msm_debugfs.h create mode 100644 drivers/gpu/drm/msm/msm_drv.c create mode 100644 drivers/gpu/drm/msm/msm_drv.h create mode 100644 drivers/gpu/drm/msm/msm_fb.c create mode 100644 drivers/gpu/drm/msm/msm_fbdev.c create mode 100644 drivers/gpu/drm/msm/msm_fence.c create mode 100644 drivers/gpu/drm/msm/msm_fence.h create mode 100644 drivers/gpu/drm/msm/msm_gem.c create mode 100644 drivers/gpu/drm/msm/msm_gem.h create mode 100644 drivers/gpu/drm/msm/msm_gem_prime.c create mode 100644 drivers/gpu/drm/msm/msm_gem_shrinker.c create mode 100644 drivers/gpu/drm/msm/msm_gem_submit.c create mode 100644 drivers/gpu/drm/msm/msm_gem_vma.c create mode 100644 drivers/gpu/drm/msm/msm_gpu.c create mode 100644 drivers/gpu/drm/msm/msm_gpu.h create mode 100644 drivers/gpu/drm/msm/msm_gpu_devfreq.c create mode 100644 drivers/gpu/drm/msm/msm_gpu_trace.h create mode 100644 drivers/gpu/drm/msm/msm_gpu_tracepoints.c create mode 100644 drivers/gpu/drm/msm/msm_gpummu.c create mode 100644 drivers/gpu/drm/msm/msm_io_utils.c create mode 100644 drivers/gpu/drm/msm/msm_iommu.c create mode 100644 drivers/gpu/drm/msm/msm_kms.h create mode 100644 drivers/gpu/drm/msm/msm_mdss.c create mode 100644 drivers/gpu/drm/msm/msm_mmu.h create mode 100644 drivers/gpu/drm/msm/msm_perf.c create mode 100644 drivers/gpu/drm/msm/msm_rd.c create mode 100644 drivers/gpu/drm/msm/msm_ringbuffer.c create mode 100644 drivers/gpu/drm/msm/msm_ringbuffer.h create mode 100644 drivers/gpu/drm/msm/msm_submitqueue.c (limited to 'drivers/gpu/drm/msm') diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig new file mode 100644 index 000000000..3c9dfdb0b --- /dev/null +++ b/drivers/gpu/drm/msm/Kconfig @@ -0,0 +1,165 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_MSM + tristate "MSM DRM" + depends on DRM + depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST + depends on COMMON_CLK + depends on IOMMU_SUPPORT + depends on QCOM_OCMEM || QCOM_OCMEM=n + depends on QCOM_LLCC || QCOM_LLCC=n + depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n + select IOMMU_IO_PGTABLE + select QCOM_MDT_LOADER if ARCH_QCOM + select REGULATOR + select DRM_DP_AUX_BUS + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HELPER + select DRM_KMS_HELPER + select DRM_PANEL + select DRM_BRIDGE + select DRM_PANEL_BRIDGE + select DRM_SCHED + select SHMEM + select TMPFS + select QCOM_SCM + select WANT_DEV_COREDUMP + select SND_SOC_HDMI_CODEC if SND_SOC + select SYNC_FILE + select PM_OPP + select NVMEM + help + DRM/KMS driver for MSM/snapdragon. + +config DRM_MSM_GPU_STATE + bool + depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP) + default y + +config DRM_MSM_GPU_SUDO + bool "Enable SUDO flag on submits" + depends on DRM_MSM && EXPERT + default n + help + Enable userspace that has CAP_SYS_RAWIO to submit GPU commands + that are run from RB instead of IB1. This essentially gives + userspace kernel level access, but is useful for firmware + debugging. + + Only use this if you are a driver developer. This should *not* + be enabled for production kernels. If unsure, say N. + +config DRM_MSM_MDSS + bool + depends on DRM_MSM + default n + +config DRM_MSM_MDP4 + bool "Enable MDP4 support in MSM DRM driver" + depends on DRM_MSM + default y + help + Compile in support for the Mobile Display Processor v4 (MDP4) in + the MSM DRM driver. It is the older display controller found in + devices using APQ8064/MSM8960/MSM8x60 platforms. + +config DRM_MSM_MDP5 + bool "Enable MDP5 support in MSM DRM driver" + depends on DRM_MSM + select DRM_MSM_MDSS + default y + help + Compile in support for the Mobile Display Processor v5 (MDP5) in + the MSM DRM driver. It is the display controller found in devices + using e.g. APQ8016/MSM8916/APQ8096/MSM8996/MSM8974/SDM6x0 platforms. + +config DRM_MSM_DPU + bool "Enable DPU support in MSM DRM driver" + depends on DRM_MSM + select DRM_MSM_MDSS + default y + help + Compile in support for the Display Processing Unit in + the MSM DRM driver. It is the display controller found in devices + using e.g. SDM845 and newer platforms. + +config DRM_MSM_DP + bool "Enable DisplayPort support in MSM DRM driver" + depends on DRM_MSM + select RATIONAL + default y + help + Compile in support for DP driver in MSM DRM driver. DP external + display support is enabled through this config option. It can + be primary or secondary display on device. + +config DRM_MSM_DSI + bool "Enable DSI support in MSM DRM driver" + depends on DRM_MSM + select DRM_PANEL + select DRM_MIPI_DSI + default y + help + Choose this option if you have a need for MIPI DSI connector + support. + +config DRM_MSM_DSI_28NM_PHY + bool "Enable DSI 28nm PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 28nm DSI PHY is used on the platform. + +config DRM_MSM_DSI_20NM_PHY + bool "Enable DSI 20nm PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 20nm DSI PHY is used on the platform. + +config DRM_MSM_DSI_28NM_8960_PHY + bool "Enable DSI 28nm 8960 PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if the 28nm DSI PHY 8960 variant is used on the + platform. + +config DRM_MSM_DSI_14NM_PHY + bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on 8996 is used on the platform. + +config DRM_MSM_DSI_10NM_PHY + bool "Enable DSI 10nm PHY driver in MSM DRM (used by SDM845)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on SDM845 is used on the platform. + +config DRM_MSM_DSI_7NM_PHY + bool "Enable DSI 7nm PHY driver in MSM DRM" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on + the platform. + +config DRM_MSM_HDMI + bool "Enable HDMI support in MSM DRM driver" + depends on DRM_MSM + default y + help + Compile in support for the HDMI output MSM DRM driver. It can + be a primary or a secondary display on device. Note that this is used + only for the direct HDMI output. If the device outputs HDMI data + through some kind of DSI-to-HDMI bridge, this option can be disabled. + +config DRM_MSM_HDMI_HDCP + bool "Enable HDMI HDCP support in MSM DRM driver" + depends on DRM_MSM && DRM_MSM_HDMI + default y + help + Choose this option to enable HDCP state machine diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile new file mode 100644 index 000000000..7274c4122 --- /dev/null +++ b/drivers/gpu/drm/msm/Makefile @@ -0,0 +1,149 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y := -I $(srctree)/$(src) +ccflags-y += -I $(srctree)/$(src)/disp/dpu1 +ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi +ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp + +msm-y := \ + adreno/adreno_device.o \ + adreno/adreno_gpu.o \ + adreno/a2xx_gpu.o \ + adreno/a3xx_gpu.o \ + adreno/a4xx_gpu.o \ + adreno/a5xx_gpu.o \ + adreno/a5xx_power.o \ + adreno/a5xx_preempt.o \ + adreno/a6xx_gpu.o \ + adreno/a6xx_gmu.o \ + adreno/a6xx_hfi.o \ + +msm-$(CONFIG_DRM_MSM_HDMI) += \ + hdmi/hdmi.o \ + hdmi/hdmi_audio.o \ + hdmi/hdmi_bridge.o \ + hdmi/hdmi_hpd.o \ + hdmi/hdmi_i2c.o \ + hdmi/hdmi_phy.o \ + hdmi/hdmi_phy_8960.o \ + hdmi/hdmi_phy_8996.o \ + hdmi/hdmi_phy_8x60.o \ + hdmi/hdmi_phy_8x74.o \ + hdmi/hdmi_pll_8960.o \ + +msm-$(CONFIG_DRM_MSM_MDP4) += \ + disp/mdp4/mdp4_crtc.o \ + disp/mdp4/mdp4_dsi_encoder.o \ + disp/mdp4/mdp4_dtv_encoder.o \ + disp/mdp4/mdp4_lcdc_encoder.o \ + disp/mdp4/mdp4_lvds_connector.o \ + disp/mdp4/mdp4_lvds_pll.o \ + disp/mdp4/mdp4_irq.o \ + disp/mdp4/mdp4_kms.o \ + disp/mdp4/mdp4_plane.o \ + +msm-$(CONFIG_DRM_MSM_MDP5) += \ + disp/mdp5/mdp5_cfg.o \ + disp/mdp5/mdp5_cmd_encoder.o \ + disp/mdp5/mdp5_ctl.o \ + disp/mdp5/mdp5_crtc.o \ + disp/mdp5/mdp5_encoder.o \ + disp/mdp5/mdp5_irq.o \ + disp/mdp5/mdp5_kms.o \ + disp/mdp5/mdp5_pipe.o \ + disp/mdp5/mdp5_mixer.o \ + disp/mdp5/mdp5_plane.o \ + disp/mdp5/mdp5_smp.o \ + +msm-$(CONFIG_DRM_MSM_DPU) += \ + disp/dpu1/dpu_core_perf.o \ + disp/dpu1/dpu_crtc.o \ + disp/dpu1/dpu_encoder.o \ + disp/dpu1/dpu_encoder_phys_cmd.o \ + disp/dpu1/dpu_encoder_phys_vid.o \ + disp/dpu1/dpu_encoder_phys_wb.o \ + disp/dpu1/dpu_formats.o \ + disp/dpu1/dpu_hw_catalog.o \ + disp/dpu1/dpu_hw_ctl.o \ + disp/dpu1/dpu_hw_dsc.o \ + disp/dpu1/dpu_hw_interrupts.o \ + disp/dpu1/dpu_hw_intf.o \ + disp/dpu1/dpu_hw_lm.o \ + disp/dpu1/dpu_hw_pingpong.o \ + disp/dpu1/dpu_hw_sspp.o \ + disp/dpu1/dpu_hw_dspp.o \ + disp/dpu1/dpu_hw_merge3d.o \ + disp/dpu1/dpu_hw_top.o \ + disp/dpu1/dpu_hw_util.o \ + disp/dpu1/dpu_hw_vbif.o \ + disp/dpu1/dpu_hw_wb.o \ + disp/dpu1/dpu_kms.o \ + disp/dpu1/dpu_plane.o \ + disp/dpu1/dpu_rm.o \ + disp/dpu1/dpu_vbif.o \ + disp/dpu1/dpu_writeback.o + +msm-$(CONFIG_DRM_MSM_MDSS) += \ + msm_mdss.o \ + +msm-y += \ + disp/mdp_format.o \ + disp/mdp_kms.o \ + disp/msm_disp_snapshot.o \ + disp/msm_disp_snapshot_util.o \ + msm_atomic.o \ + msm_atomic_tracepoints.o \ + msm_debugfs.o \ + msm_drv.o \ + msm_fb.o \ + msm_fence.o \ + msm_gem.o \ + msm_gem_prime.o \ + msm_gem_shrinker.o \ + msm_gem_submit.o \ + msm_gem_vma.o \ + msm_gpu.o \ + msm_gpu_devfreq.o \ + msm_io_utils.o \ + msm_iommu.o \ + msm_perf.o \ + msm_rd.o \ + msm_ringbuffer.o \ + msm_submitqueue.o \ + msm_gpu_tracepoints.o \ + msm_gpummu.o + +msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \ + dp/dp_debug.o + +msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o + +msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ + dp/dp_catalog.o \ + dp/dp_ctrl.o \ + dp/dp_display.o \ + dp/dp_drm.o \ + dp/dp_hpd.o \ + dp/dp_link.o \ + dp/dp_panel.o \ + dp/dp_parser.o \ + dp/dp_power.o \ + dp/dp_audio.o + +msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o + +msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o + +msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ + dsi/dsi_cfg.o \ + dsi/dsi_host.o \ + dsi/dsi_manager.o \ + dsi/phy/dsi_phy.o + +msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o +msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o +msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o +msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o +msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o +msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o + +obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES new file mode 100644 index 000000000..9c4255b98 --- /dev/null +++ b/drivers/gpu/drm/msm/NOTES @@ -0,0 +1,87 @@ +NOTES about msm drm/kms driver: + +In the current snapdragon SoC's, we have (at least) 3 different +display controller blocks at play: + + MDP3 - ?? seems to be what is on geeksphone peak device + + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410) + + MDP5 - snapdragon 800 + +(I don't have a completely clear picture on which display controller +maps to which part #) + +Plus a handful of blocks around them for HDMI/DSI/etc output. + +And on gpu side of things: + + zero, one, or two 2d cores (z180) + + and either a2xx or a3xx 3d core. + +But, HDMI/DSI/etc blocks seem like they can be shared across multiple +display controller blocks. And I for sure don't want to have to deal +with N different kms devices from xf86-video-freedreno. Plus, it +seems like we can do some clever tricks like use GPU to trigger +pageflip after rendering completes (ie. have the kms/crtc code build +up gpu cmdstream to update scanout and write FLUSH register after). + +So, the approach is one drm driver, with some modularity. Different +'struct msm_kms' implementations, depending on display controller. +And one or more 'struct msm_gpu' for the various different gpu sub- +modules. + +(Second part is not implemented yet. So far this is just basic KMS +driver, and not exposing any custom ioctls to userspace for now.) + +The kms module provides the plane, crtc, and encoder objects, and +loads whatever connectors are appropriate. + +For MDP4, the mapping is: + + plane -> PIPE{RGBn,VGn} \ + crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device" + encoder -> DTV/LCDC/DSI (within MDP4) / + connector -> HDMI/DSI/etc --> other device(s) + +Since the irq's that drm core mostly cares about are vblank/framedone, +we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions +and treat the MDP4 block's irq as "the" irq. Even though the connectors +may have their own irqs which they install themselves. For this reason +the display controller is the "master" device. + +For MDP5, the mapping is: + + plane -> PIPE{RGBn,VIGn} \ + crtc -> LM (layer mixer) |-> MDP "device" + encoder -> INTF / + connector -> HDMI/DSI/eDP/etc --> other device(s) + +Unlike MDP4, it appears we can get by with a single encoder, rather +than needing a different implementation for DTV, DSI, etc. (Ie. the +register interface is same, just different bases.) + +Also unlike MDP4, with MDP5 all the IRQs for other blocks (HDMI, DSI, +etc) are routed through MDP. + +And finally, MDP5 has this "Shared Memory Pool" (called "SMP"), from +which blocks need to be allocated to the active pipes based on fetch +stride. + +Each connector probably ends up being a separate device, just for the +logistics of finding/mapping io region, irq, etc. Idealy we would +have a better way than just stashing the platform device in a global +(ie. like DT super-node.. but I don't have any snapdragon hw yet that +is using DT). + +Note that so far I've not been able to get any docs on the hw, and it +seems that access to such docs would prevent me from working on the +freedreno gallium driver. So there may be some mistakes in register +names (I had to invent a few, since no sufficient hint was given in +the downstream android fbdev driver), bitfield sizes, etc. My current +state of understanding the registers is given in the envytools rnndb +files at: + + https://github.com/freedreno/envytools/tree/master/rnndb + (the mdp4/hdmi/dsi directories) + +These files are used both for a parser tool (in the same tree) to +parse logged register reads/writes (both from downstream android fbdev +driver, and this driver with register logging enabled), as well as to +generate the register level headers. diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h new file mode 100644 index 000000000..afa602334 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h @@ -0,0 +1,3212 @@ +#ifndef A2XX_XML +#define A2XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a2xx_rb_dither_type { + DITHER_PIXEL = 0, + DITHER_SUBPIXEL = 1, +}; + +enum a2xx_colorformatx { + COLORX_4_4_4_4 = 0, + COLORX_1_5_5_5 = 1, + COLORX_5_6_5 = 2, + COLORX_8 = 3, + COLORX_8_8 = 4, + COLORX_8_8_8_8 = 5, + COLORX_S8_8_8_8 = 6, + COLORX_16_FLOAT = 7, + COLORX_16_16_FLOAT = 8, + COLORX_16_16_16_16_FLOAT = 9, + COLORX_32_FLOAT = 10, + COLORX_32_32_FLOAT = 11, + COLORX_32_32_32_32_FLOAT = 12, + COLORX_2_3_3 = 13, + COLORX_8_8_8 = 14, +}; + +enum a2xx_sq_surfaceformat { + FMT_1_REVERSE = 0, + FMT_1 = 1, + FMT_8 = 2, + FMT_1_5_5_5 = 3, + FMT_5_6_5 = 4, + FMT_6_5_5 = 5, + FMT_8_8_8_8 = 6, + FMT_2_10_10_10 = 7, + FMT_8_A = 8, + FMT_8_B = 9, + FMT_8_8 = 10, + FMT_Cr_Y1_Cb_Y0 = 11, + FMT_Y1_Cr_Y0_Cb = 12, + FMT_5_5_5_1 = 13, + FMT_8_8_8_8_A = 14, + FMT_4_4_4_4 = 15, + FMT_8_8_8 = 16, + FMT_DXT1 = 18, + FMT_DXT2_3 = 19, + FMT_DXT4_5 = 20, + FMT_10_10_10_2 = 21, + FMT_24_8 = 22, + FMT_16 = 24, + FMT_16_16 = 25, + FMT_16_16_16_16 = 26, + FMT_16_EXPAND = 27, + FMT_16_16_EXPAND = 28, + FMT_16_16_16_16_EXPAND = 29, + FMT_16_FLOAT = 30, + FMT_16_16_FLOAT = 31, + FMT_16_16_16_16_FLOAT = 32, + FMT_32 = 33, + FMT_32_32 = 34, + FMT_32_32_32_32 = 35, + FMT_32_FLOAT = 36, + FMT_32_32_FLOAT = 37, + FMT_32_32_32_32_FLOAT = 38, + FMT_ATI_TC_RGB = 39, + FMT_ATI_TC_RGBA = 40, + FMT_ATI_TC_555_565_RGB = 41, + FMT_ATI_TC_555_565_RGBA = 42, + FMT_ATI_TC_RGBA_INTERP = 43, + FMT_ATI_TC_555_565_RGBA_INTERP = 44, + FMT_ETC1_RGBA_INTERP = 46, + FMT_ETC1_RGB = 47, + FMT_ETC1_RGBA = 48, + FMT_DXN = 49, + FMT_2_3_3 = 51, + FMT_2_10_10_10_AS_16_16_16_16 = 54, + FMT_10_10_10_2_AS_16_16_16_16 = 55, + FMT_32_32_32_FLOAT = 57, + FMT_DXT3A = 58, + FMT_DXT5A = 59, + FMT_CTX1 = 60, +}; + +enum a2xx_sq_ps_vtx_mode { + POSITION_1_VECTOR = 0, + POSITION_2_VECTORS_UNUSED = 1, + POSITION_2_VECTORS_SPRITE = 2, + POSITION_2_VECTORS_EDGE = 3, + POSITION_2_VECTORS_KILL = 4, + POSITION_2_VECTORS_SPRITE_KILL = 5, + POSITION_2_VECTORS_EDGE_KILL = 6, + MULTIPASS = 7, +}; + +enum a2xx_sq_sample_cntl { + CENTROIDS_ONLY = 0, + CENTERS_ONLY = 1, + CENTROIDS_AND_CENTERS = 2, +}; + +enum a2xx_dx_clip_space { + DXCLIP_OPENGL = 0, + DXCLIP_DIRECTX = 1, +}; + +enum a2xx_pa_su_sc_polymode { + POLY_DISABLED = 0, + POLY_DUALMODE = 1, +}; + +enum a2xx_rb_edram_mode { + EDRAM_NOP = 0, + COLOR_DEPTH = 4, + DEPTH_ONLY = 5, + EDRAM_COPY = 6, +}; + +enum a2xx_pa_sc_pattern_bit_order { + LITTLE = 0, + BIG = 1, +}; + +enum a2xx_pa_sc_auto_reset_cntl { + NEVER = 0, + EACH_PRIMITIVE = 1, + EACH_PACKET = 2, +}; + +enum a2xx_pa_pixcenter { + PIXCENTER_D3D = 0, + PIXCENTER_OGL = 1, +}; + +enum a2xx_pa_roundmode { + TRUNCATE = 0, + ROUND = 1, + ROUNDTOEVEN = 2, + ROUNDTOODD = 3, +}; + +enum a2xx_pa_quantmode { + ONE_SIXTEENTH = 0, + ONE_EIGTH = 1, + ONE_QUARTER = 2, + ONE_HALF = 3, + ONE = 4, +}; + +enum a2xx_rb_copy_sample_select { + SAMPLE_0 = 0, + SAMPLE_1 = 1, + SAMPLE_2 = 2, + SAMPLE_3 = 3, + SAMPLE_01 = 4, + SAMPLE_23 = 5, + SAMPLE_0123 = 6, +}; + +enum a2xx_rb_blend_opcode { + BLEND2_DST_PLUS_SRC = 0, + BLEND2_SRC_MINUS_DST = 1, + BLEND2_MIN_DST_SRC = 2, + BLEND2_MAX_DST_SRC = 3, + BLEND2_DST_MINUS_SRC = 4, + BLEND2_DST_PLUS_SRC_BIAS = 5, +}; + +enum a2xx_su_perfcnt_select { + PERF_PAPC_PASX_REQ = 0, + PERF_PAPC_PASX_FIRST_VECTOR = 2, + PERF_PAPC_PASX_SECOND_VECTOR = 3, + PERF_PAPC_PASX_FIRST_DEAD = 4, + PERF_PAPC_PASX_SECOND_DEAD = 5, + PERF_PAPC_PASX_VTX_KILL_DISCARD = 6, + PERF_PAPC_PASX_VTX_NAN_DISCARD = 7, + PERF_PAPC_PA_INPUT_PRIM = 8, + PERF_PAPC_PA_INPUT_NULL_PRIM = 9, + PERF_PAPC_PA_INPUT_EVENT_FLAG = 10, + PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 11, + PERF_PAPC_PA_INPUT_END_OF_PACKET = 12, + PERF_PAPC_CLPR_CULL_PRIM = 13, + PERF_PAPC_CLPR_VV_CULL_PRIM = 15, + PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 17, + PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 18, + PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 19, + PERF_PAPC_CLPR_VV_CLIP_PRIM = 21, + PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 23, + PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 24, + PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 25, + PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 26, + PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 27, + PERF_PAPC_CLPR_CLIP_PLANE_CNT_5 = 28, + PERF_PAPC_CLPR_CLIP_PLANE_CNT_6 = 29, + PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 30, + PERF_PAPC_CLPR_CLIP_PLANE_FAR = 31, + PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 32, + PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 33, + PERF_PAPC_CLPR_CLIP_PLANE_TOP = 34, + PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 35, + PERF_PAPC_CLSM_NULL_PRIM = 36, + PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 37, + PERF_PAPC_CLSM_CLIP_PRIM = 38, + PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 39, + PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 40, + PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 41, + PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 42, + PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 43, + PERF_PAPC_CLSM_OUT_PRIM_CNT_5 = 44, + PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7 = 45, + PERF_PAPC_CLSM_NON_TRIVIAL_CULL = 46, + PERF_PAPC_SU_INPUT_PRIM = 47, + PERF_PAPC_SU_INPUT_CLIP_PRIM = 48, + PERF_PAPC_SU_INPUT_NULL_PRIM = 49, + PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 50, + PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 51, + PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 52, + PERF_PAPC_SU_POLYMODE_FACE_CULL = 53, + PERF_PAPC_SU_POLYMODE_BACK_CULL = 54, + PERF_PAPC_SU_POLYMODE_FRONT_CULL = 55, + PERF_PAPC_SU_POLYMODE_INVALID_FILL = 56, + PERF_PAPC_SU_OUTPUT_PRIM = 57, + PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 58, + PERF_PAPC_SU_OUTPUT_NULL_PRIM = 59, + PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 60, + PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 61, + PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 62, + PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 63, + PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 64, + PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 65, + PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 66, + PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 67, + PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 68, + PERF_PAPC_PASX_REQ_IDLE = 69, + PERF_PAPC_PASX_REQ_BUSY = 70, + PERF_PAPC_PASX_REQ_STALLED = 71, + PERF_PAPC_PASX_REC_IDLE = 72, + PERF_PAPC_PASX_REC_BUSY = 73, + PERF_PAPC_PASX_REC_STARVED_SX = 74, + PERF_PAPC_PASX_REC_STALLED = 75, + PERF_PAPC_PASX_REC_STALLED_POS_MEM = 76, + PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 77, + PERF_PAPC_CCGSM_IDLE = 78, + PERF_PAPC_CCGSM_BUSY = 79, + PERF_PAPC_CCGSM_STALLED = 80, + PERF_PAPC_CLPRIM_IDLE = 81, + PERF_PAPC_CLPRIM_BUSY = 82, + PERF_PAPC_CLPRIM_STALLED = 83, + PERF_PAPC_CLPRIM_STARVED_CCGSM = 84, + PERF_PAPC_CLIPSM_IDLE = 85, + PERF_PAPC_CLIPSM_BUSY = 86, + PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 87, + PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 88, + PERF_PAPC_CLIPSM_WAIT_CLIPGA = 89, + PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 90, + PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 91, + PERF_PAPC_CLIPGA_IDLE = 92, + PERF_PAPC_CLIPGA_BUSY = 93, + PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 94, + PERF_PAPC_CLIPGA_STALLED = 95, + PERF_PAPC_CLIP_IDLE = 96, + PERF_PAPC_CLIP_BUSY = 97, + PERF_PAPC_SU_IDLE = 98, + PERF_PAPC_SU_BUSY = 99, + PERF_PAPC_SU_STARVED_CLIP = 100, + PERF_PAPC_SU_STALLED_SC = 101, + PERF_PAPC_SU_FACENESS_CULL = 102, +}; + +enum a2xx_sc_perfcnt_select { + SC_SR_WINDOW_VALID = 0, + SC_CW_WINDOW_VALID = 1, + SC_QM_WINDOW_VALID = 2, + SC_FW_WINDOW_VALID = 3, + SC_EZ_WINDOW_VALID = 4, + SC_IT_WINDOW_VALID = 5, + SC_STARVED_BY_PA = 6, + SC_STALLED_BY_RB_TILE = 7, + SC_STALLED_BY_RB_SAMP = 8, + SC_STARVED_BY_RB_EZ = 9, + SC_STALLED_BY_SAMPLE_FF = 10, + SC_STALLED_BY_SQ = 11, + SC_STALLED_BY_SP = 12, + SC_TOTAL_NO_PRIMS = 13, + SC_NON_EMPTY_PRIMS = 14, + SC_NO_TILES_PASSING_QM = 15, + SC_NO_PIXELS_PRE_EZ = 16, + SC_NO_PIXELS_POST_EZ = 17, +}; + +enum a2xx_vgt_perfcount_select { + VGT_SQ_EVENT_WINDOW_ACTIVE = 0, + VGT_SQ_SEND = 1, + VGT_SQ_STALLED = 2, + VGT_SQ_STARVED_BUSY = 3, + VGT_SQ_STARVED_IDLE = 4, + VGT_SQ_STATIC = 5, + VGT_PA_EVENT_WINDOW_ACTIVE = 6, + VGT_PA_CLIP_V_SEND = 7, + VGT_PA_CLIP_V_STALLED = 8, + VGT_PA_CLIP_V_STARVED_BUSY = 9, + VGT_PA_CLIP_V_STARVED_IDLE = 10, + VGT_PA_CLIP_V_STATIC = 11, + VGT_PA_CLIP_P_SEND = 12, + VGT_PA_CLIP_P_STALLED = 13, + VGT_PA_CLIP_P_STARVED_BUSY = 14, + VGT_PA_CLIP_P_STARVED_IDLE = 15, + VGT_PA_CLIP_P_STATIC = 16, + VGT_PA_CLIP_S_SEND = 17, + VGT_PA_CLIP_S_STALLED = 18, + VGT_PA_CLIP_S_STARVED_BUSY = 19, + VGT_PA_CLIP_S_STARVED_IDLE = 20, + VGT_PA_CLIP_S_STATIC = 21, + RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 22, + RBIU_IMMED_DATA_FIFO_STARVED = 23, + RBIU_IMMED_DATA_FIFO_STALLED = 24, + RBIU_DMA_REQUEST_FIFO_STARVED = 25, + RBIU_DMA_REQUEST_FIFO_STALLED = 26, + RBIU_DRAW_INITIATOR_FIFO_STARVED = 27, + RBIU_DRAW_INITIATOR_FIFO_STALLED = 28, + BIN_PRIM_NEAR_CULL = 29, + BIN_PRIM_ZERO_CULL = 30, + BIN_PRIM_FAR_CULL = 31, + BIN_PRIM_BIN_CULL = 32, + BIN_PRIM_FACE_CULL = 33, + SPARE34 = 34, + SPARE35 = 35, + SPARE36 = 36, + SPARE37 = 37, + SPARE38 = 38, + SPARE39 = 39, + TE_SU_IN_VALID = 40, + TE_SU_IN_READ = 41, + TE_SU_IN_PRIM = 42, + TE_SU_IN_EOP = 43, + TE_SU_IN_NULL_PRIM = 44, + TE_WK_IN_VALID = 45, + TE_WK_IN_READ = 46, + TE_OUT_PRIM_VALID = 47, + TE_OUT_PRIM_READ = 48, +}; + +enum a2xx_tcr_perfcount_select { + DGMMPD_IPMUX0_STALL = 0, + DGMMPD_IPMUX_ALL_STALL = 4, + OPMUX0_L2_WRITES = 5, +}; + +enum a2xx_tp_perfcount_select { + POINT_QUADS = 0, + BILIN_QUADS = 1, + ANISO_QUADS = 2, + MIP_QUADS = 3, + VOL_QUADS = 4, + MIP_VOL_QUADS = 5, + MIP_ANISO_QUADS = 6, + VOL_ANISO_QUADS = 7, + ANISO_2_1_QUADS = 8, + ANISO_4_1_QUADS = 9, + ANISO_6_1_QUADS = 10, + ANISO_8_1_QUADS = 11, + ANISO_10_1_QUADS = 12, + ANISO_12_1_QUADS = 13, + ANISO_14_1_QUADS = 14, + ANISO_16_1_QUADS = 15, + MIP_VOL_ANISO_QUADS = 16, + ALIGN_2_QUADS = 17, + ALIGN_4_QUADS = 18, + PIX_0_QUAD = 19, + PIX_1_QUAD = 20, + PIX_2_QUAD = 21, + PIX_3_QUAD = 22, + PIX_4_QUAD = 23, + TP_MIPMAP_LOD0 = 24, + TP_MIPMAP_LOD1 = 25, + TP_MIPMAP_LOD2 = 26, + TP_MIPMAP_LOD3 = 27, + TP_MIPMAP_LOD4 = 28, + TP_MIPMAP_LOD5 = 29, + TP_MIPMAP_LOD6 = 30, + TP_MIPMAP_LOD7 = 31, + TP_MIPMAP_LOD8 = 32, + TP_MIPMAP_LOD9 = 33, + TP_MIPMAP_LOD10 = 34, + TP_MIPMAP_LOD11 = 35, + TP_MIPMAP_LOD12 = 36, + TP_MIPMAP_LOD13 = 37, + TP_MIPMAP_LOD14 = 38, +}; + +enum a2xx_tcm_perfcount_select { + QUAD0_RD_LAT_FIFO_EMPTY = 0, + QUAD0_RD_LAT_FIFO_4TH_FULL = 3, + QUAD0_RD_LAT_FIFO_HALF_FULL = 4, + QUAD0_RD_LAT_FIFO_FULL = 5, + QUAD0_RD_LAT_FIFO_LT_4TH_FULL = 6, + READ_STARVED_QUAD0 = 28, + READ_STARVED = 32, + READ_STALLED_QUAD0 = 33, + READ_STALLED = 37, + VALID_READ_QUAD0 = 38, + TC_TP_STARVED_QUAD0 = 42, + TC_TP_STARVED = 46, +}; + +enum a2xx_tcf_perfcount_select { + VALID_CYCLES = 0, + SINGLE_PHASES = 1, + ANISO_PHASES = 2, + MIP_PHASES = 3, + VOL_PHASES = 4, + MIP_VOL_PHASES = 5, + MIP_ANISO_PHASES = 6, + VOL_ANISO_PHASES = 7, + ANISO_2_1_PHASES = 8, + ANISO_4_1_PHASES = 9, + ANISO_6_1_PHASES = 10, + ANISO_8_1_PHASES = 11, + ANISO_10_1_PHASES = 12, + ANISO_12_1_PHASES = 13, + ANISO_14_1_PHASES = 14, + ANISO_16_1_PHASES = 15, + MIP_VOL_ANISO_PHASES = 16, + ALIGN_2_PHASES = 17, + ALIGN_4_PHASES = 18, + TPC_BUSY = 19, + TPC_STALLED = 20, + TPC_STARVED = 21, + TPC_WORKING = 22, + TPC_WALKER_BUSY = 23, + TPC_WALKER_STALLED = 24, + TPC_WALKER_WORKING = 25, + TPC_ALIGNER_BUSY = 26, + TPC_ALIGNER_STALLED = 27, + TPC_ALIGNER_STALLED_BY_BLEND = 28, + TPC_ALIGNER_STALLED_BY_CACHE = 29, + TPC_ALIGNER_WORKING = 30, + TPC_BLEND_BUSY = 31, + TPC_BLEND_SYNC = 32, + TPC_BLEND_STARVED = 33, + TPC_BLEND_WORKING = 34, + OPCODE_0x00 = 35, + OPCODE_0x01 = 36, + OPCODE_0x04 = 37, + OPCODE_0x10 = 38, + OPCODE_0x11 = 39, + OPCODE_0x12 = 40, + OPCODE_0x13 = 41, + OPCODE_0x18 = 42, + OPCODE_0x19 = 43, + OPCODE_0x1A = 44, + OPCODE_OTHER = 45, + IN_FIFO_0_EMPTY = 56, + IN_FIFO_0_LT_HALF_FULL = 57, + IN_FIFO_0_HALF_FULL = 58, + IN_FIFO_0_FULL = 59, + IN_FIFO_TPC_EMPTY = 72, + IN_FIFO_TPC_LT_HALF_FULL = 73, + IN_FIFO_TPC_HALF_FULL = 74, + IN_FIFO_TPC_FULL = 75, + TPC_TC_XFC = 76, + TPC_TC_STATE = 77, + TC_STALL = 78, + QUAD0_TAPS = 79, + QUADS = 83, + TCA_SYNC_STALL = 84, + TAG_STALL = 85, + TCB_SYNC_STALL = 88, + TCA_VALID = 89, + PROBES_VALID = 90, + MISS_STALL = 91, + FETCH_FIFO_STALL = 92, + TCO_STALL = 93, + ANY_STALL = 94, + TAG_MISSES = 95, + TAG_HITS = 96, + SUB_TAG_MISSES = 97, + SET0_INVALIDATES = 98, + SET1_INVALIDATES = 99, + SET2_INVALIDATES = 100, + SET3_INVALIDATES = 101, + SET0_TAG_MISSES = 102, + SET1_TAG_MISSES = 103, + SET2_TAG_MISSES = 104, + SET3_TAG_MISSES = 105, + SET0_TAG_HITS = 106, + SET1_TAG_HITS = 107, + SET2_TAG_HITS = 108, + SET3_TAG_HITS = 109, + SET0_SUB_TAG_MISSES = 110, + SET1_SUB_TAG_MISSES = 111, + SET2_SUB_TAG_MISSES = 112, + SET3_SUB_TAG_MISSES = 113, + SET0_EVICT1 = 114, + SET0_EVICT2 = 115, + SET0_EVICT3 = 116, + SET0_EVICT4 = 117, + SET0_EVICT5 = 118, + SET0_EVICT6 = 119, + SET0_EVICT7 = 120, + SET0_EVICT8 = 121, + SET1_EVICT1 = 130, + SET1_EVICT2 = 131, + SET1_EVICT3 = 132, + SET1_EVICT4 = 133, + SET1_EVICT5 = 134, + SET1_EVICT6 = 135, + SET1_EVICT7 = 136, + SET1_EVICT8 = 137, + SET2_EVICT1 = 146, + SET2_EVICT2 = 147, + SET2_EVICT3 = 148, + SET2_EVICT4 = 149, + SET2_EVICT5 = 150, + SET2_EVICT6 = 151, + SET2_EVICT7 = 152, + SET2_EVICT8 = 153, + SET3_EVICT1 = 162, + SET3_EVICT2 = 163, + SET3_EVICT3 = 164, + SET3_EVICT4 = 165, + SET3_EVICT5 = 166, + SET3_EVICT6 = 167, + SET3_EVICT7 = 168, + SET3_EVICT8 = 169, + FF_EMPTY = 178, + FF_LT_HALF_FULL = 179, + FF_HALF_FULL = 180, + FF_FULL = 181, + FF_XFC = 182, + FF_STALLED = 183, + FG_MASKS = 184, + FG_LEFT_MASKS = 185, + FG_LEFT_MASK_STALLED = 186, + FG_LEFT_NOT_DONE_STALL = 187, + FG_LEFT_FG_STALL = 188, + FG_LEFT_SECTORS = 189, + FG0_REQUESTS = 195, + FG0_STALLED = 196, + MEM_REQ512 = 199, + MEM_REQ_SENT = 200, + MEM_LOCAL_READ_REQ = 202, + TC0_MH_STALLED = 203, +}; + +enum a2xx_sq_perfcnt_select { + SQ_PIXEL_VECTORS_SUB = 0, + SQ_VERTEX_VECTORS_SUB = 1, + SQ_ALU0_ACTIVE_VTX_SIMD0 = 2, + SQ_ALU1_ACTIVE_VTX_SIMD0 = 3, + SQ_ALU0_ACTIVE_PIX_SIMD0 = 4, + SQ_ALU1_ACTIVE_PIX_SIMD0 = 5, + SQ_ALU0_ACTIVE_VTX_SIMD1 = 6, + SQ_ALU1_ACTIVE_VTX_SIMD1 = 7, + SQ_ALU0_ACTIVE_PIX_SIMD1 = 8, + SQ_ALU1_ACTIVE_PIX_SIMD1 = 9, + SQ_EXPORT_CYCLES = 10, + SQ_ALU_CST_WRITTEN = 11, + SQ_TEX_CST_WRITTEN = 12, + SQ_ALU_CST_STALL = 13, + SQ_ALU_TEX_STALL = 14, + SQ_INST_WRITTEN = 15, + SQ_BOOLEAN_WRITTEN = 16, + SQ_LOOPS_WRITTEN = 17, + SQ_PIXEL_SWAP_IN = 18, + SQ_PIXEL_SWAP_OUT = 19, + SQ_VERTEX_SWAP_IN = 20, + SQ_VERTEX_SWAP_OUT = 21, + SQ_ALU_VTX_INST_ISSUED = 22, + SQ_TEX_VTX_INST_ISSUED = 23, + SQ_VC_VTX_INST_ISSUED = 24, + SQ_CF_VTX_INST_ISSUED = 25, + SQ_ALU_PIX_INST_ISSUED = 26, + SQ_TEX_PIX_INST_ISSUED = 27, + SQ_VC_PIX_INST_ISSUED = 28, + SQ_CF_PIX_INST_ISSUED = 29, + SQ_ALU0_FIFO_EMPTY_SIMD0 = 30, + SQ_ALU1_FIFO_EMPTY_SIMD0 = 31, + SQ_ALU0_FIFO_EMPTY_SIMD1 = 32, + SQ_ALU1_FIFO_EMPTY_SIMD1 = 33, + SQ_ALU_NOPS = 34, + SQ_PRED_SKIP = 35, + SQ_SYNC_ALU_STALL_SIMD0_VTX = 36, + SQ_SYNC_ALU_STALL_SIMD1_VTX = 37, + SQ_SYNC_TEX_STALL_VTX = 38, + SQ_SYNC_VC_STALL_VTX = 39, + SQ_CONSTANTS_USED_SIMD0 = 40, + SQ_CONSTANTS_SENT_SP_SIMD0 = 41, + SQ_GPR_STALL_VTX = 42, + SQ_GPR_STALL_PIX = 43, + SQ_VTX_RS_STALL = 44, + SQ_PIX_RS_STALL = 45, + SQ_SX_PC_FULL = 46, + SQ_SX_EXP_BUFF_FULL = 47, + SQ_SX_POS_BUFF_FULL = 48, + SQ_INTERP_QUADS = 49, + SQ_INTERP_ACTIVE = 50, + SQ_IN_PIXEL_STALL = 51, + SQ_IN_VTX_STALL = 52, + SQ_VTX_CNT = 53, + SQ_VTX_VECTOR2 = 54, + SQ_VTX_VECTOR3 = 55, + SQ_VTX_VECTOR4 = 56, + SQ_PIXEL_VECTOR1 = 57, + SQ_PIXEL_VECTOR23 = 58, + SQ_PIXEL_VECTOR4 = 59, + SQ_CONSTANTS_USED_SIMD1 = 60, + SQ_CONSTANTS_SENT_SP_SIMD1 = 61, + SQ_SX_MEM_EXP_FULL = 62, + SQ_ALU0_ACTIVE_VTX_SIMD2 = 63, + SQ_ALU1_ACTIVE_VTX_SIMD2 = 64, + SQ_ALU0_ACTIVE_PIX_SIMD2 = 65, + SQ_ALU1_ACTIVE_PIX_SIMD2 = 66, + SQ_ALU0_ACTIVE_VTX_SIMD3 = 67, + SQ_PERFCOUNT_VTX_QUAL_TP_DONE = 68, + SQ_ALU0_ACTIVE_PIX_SIMD3 = 69, + SQ_PERFCOUNT_PIX_QUAL_TP_DONE = 70, + SQ_ALU0_FIFO_EMPTY_SIMD2 = 71, + SQ_ALU1_FIFO_EMPTY_SIMD2 = 72, + SQ_ALU0_FIFO_EMPTY_SIMD3 = 73, + SQ_ALU1_FIFO_EMPTY_SIMD3 = 74, + SQ_SYNC_ALU_STALL_SIMD2_VTX = 75, + SQ_PERFCOUNT_VTX_POP_THREAD = 76, + SQ_SYNC_ALU_STALL_SIMD0_PIX = 77, + SQ_SYNC_ALU_STALL_SIMD1_PIX = 78, + SQ_SYNC_ALU_STALL_SIMD2_PIX = 79, + SQ_PERFCOUNT_PIX_POP_THREAD = 80, + SQ_SYNC_TEX_STALL_PIX = 81, + SQ_SYNC_VC_STALL_PIX = 82, + SQ_CONSTANTS_USED_SIMD2 = 83, + SQ_CONSTANTS_SENT_SP_SIMD2 = 84, + SQ_PERFCOUNT_VTX_DEALLOC_ACK = 85, + SQ_PERFCOUNT_PIX_DEALLOC_ACK = 86, + SQ_ALU0_FIFO_FULL_SIMD0 = 87, + SQ_ALU1_FIFO_FULL_SIMD0 = 88, + SQ_ALU0_FIFO_FULL_SIMD1 = 89, + SQ_ALU1_FIFO_FULL_SIMD1 = 90, + SQ_ALU0_FIFO_FULL_SIMD2 = 91, + SQ_ALU1_FIFO_FULL_SIMD2 = 92, + SQ_ALU0_FIFO_FULL_SIMD3 = 93, + SQ_ALU1_FIFO_FULL_SIMD3 = 94, + VC_PERF_STATIC = 95, + VC_PERF_STALLED = 96, + VC_PERF_STARVED = 97, + VC_PERF_SEND = 98, + VC_PERF_ACTUAL_STARVED = 99, + PIXEL_THREAD_0_ACTIVE = 100, + VERTEX_THREAD_0_ACTIVE = 101, + PIXEL_THREAD_0_NUMBER = 102, + VERTEX_THREAD_0_NUMBER = 103, + VERTEX_EVENT_NUMBER = 104, + PIXEL_EVENT_NUMBER = 105, + PTRBUFF_EF_PUSH = 106, + PTRBUFF_EF_POP_EVENT = 107, + PTRBUFF_EF_POP_NEW_VTX = 108, + PTRBUFF_EF_POP_DEALLOC = 109, + PTRBUFF_EF_POP_PVECTOR = 110, + PTRBUFF_EF_POP_PVECTOR_X = 111, + PTRBUFF_EF_POP_PVECTOR_VNZ = 112, + PTRBUFF_PB_DEALLOC = 113, + PTRBUFF_PI_STATE_PPB_POP = 114, + PTRBUFF_PI_RTR = 115, + PTRBUFF_PI_READ_EN = 116, + PTRBUFF_PI_BUFF_SWAP = 117, + PTRBUFF_SQ_FREE_BUFF = 118, + PTRBUFF_SQ_DEC = 119, + PTRBUFF_SC_VALID_CNTL_EVENT = 120, + PTRBUFF_SC_VALID_IJ_XFER = 121, + PTRBUFF_SC_NEW_VECTOR_1_Q = 122, + PTRBUFF_QUAL_NEW_VECTOR = 123, + PTRBUFF_QUAL_EVENT = 124, + PTRBUFF_END_BUFFER = 125, + PTRBUFF_FILL_QUAD = 126, + VERTS_WRITTEN_SPI = 127, + TP_FETCH_INSTR_EXEC = 128, + TP_FETCH_INSTR_REQ = 129, + TP_DATA_RETURN = 130, + SPI_WRITE_CYCLES_SP = 131, + SPI_WRITES_SP = 132, + SP_ALU_INSTR_EXEC = 133, + SP_CONST_ADDR_TO_SQ = 134, + SP_PRED_KILLS_TO_SQ = 135, + SP_EXPORT_CYCLES_TO_SX = 136, + SP_EXPORTS_TO_SX = 137, + SQ_CYCLES_ELAPSED = 138, + SQ_TCFS_OPT_ALLOC_EXEC = 139, + SQ_TCFS_NO_OPT_ALLOC = 140, + SQ_ALU0_NO_OPT_ALLOC = 141, + SQ_ALU1_NO_OPT_ALLOC = 142, + SQ_TCFS_ARB_XFC_CNT = 143, + SQ_ALU0_ARB_XFC_CNT = 144, + SQ_ALU1_ARB_XFC_CNT = 145, + SQ_TCFS_CFS_UPDATE_CNT = 146, + SQ_ALU0_CFS_UPDATE_CNT = 147, + SQ_ALU1_CFS_UPDATE_CNT = 148, + SQ_VTX_PUSH_THREAD_CNT = 149, + SQ_VTX_POP_THREAD_CNT = 150, + SQ_PIX_PUSH_THREAD_CNT = 151, + SQ_PIX_POP_THREAD_CNT = 152, + SQ_PIX_TOTAL = 153, + SQ_PIX_KILLED = 154, +}; + +enum a2xx_sx_perfcnt_select { + SX_EXPORT_VECTORS = 0, + SX_DUMMY_QUADS = 1, + SX_ALPHA_FAIL = 2, + SX_RB_QUAD_BUSY = 3, + SX_RB_COLOR_BUSY = 4, + SX_RB_QUAD_STALL = 5, + SX_RB_COLOR_STALL = 6, +}; + +enum a2xx_rbbm_perfcount1_sel { + RBBM1_COUNT = 0, + RBBM1_NRT_BUSY = 1, + RBBM1_RB_BUSY = 2, + RBBM1_SQ_CNTX0_BUSY = 3, + RBBM1_SQ_CNTX17_BUSY = 4, + RBBM1_VGT_BUSY = 5, + RBBM1_VGT_NODMA_BUSY = 6, + RBBM1_PA_BUSY = 7, + RBBM1_SC_CNTX_BUSY = 8, + RBBM1_TPC_BUSY = 9, + RBBM1_TC_BUSY = 10, + RBBM1_SX_BUSY = 11, + RBBM1_CP_COHER_BUSY = 12, + RBBM1_CP_NRT_BUSY = 13, + RBBM1_GFX_IDLE_STALL = 14, + RBBM1_INTERRUPT = 15, +}; + +enum a2xx_cp_perfcount_sel { + ALWAYS_COUNT = 0, + TRANS_FIFO_FULL = 1, + TRANS_FIFO_AF = 2, + RCIU_PFPTRANS_WAIT = 3, + RCIU_NRTTRANS_WAIT = 6, + CSF_NRT_READ_WAIT = 8, + CSF_I1_FIFO_FULL = 9, + CSF_I2_FIFO_FULL = 10, + CSF_ST_FIFO_FULL = 11, + CSF_RING_ROQ_FULL = 13, + CSF_I1_ROQ_FULL = 14, + CSF_I2_ROQ_FULL = 15, + CSF_ST_ROQ_FULL = 16, + MIU_TAG_MEM_FULL = 18, + MIU_WRITECLEAN = 19, + MIU_NRT_WRITE_STALLED = 22, + MIU_NRT_READ_STALLED = 23, + ME_WRITE_CONFIRM_FIFO_FULL = 24, + ME_VS_DEALLOC_FIFO_FULL = 25, + ME_PS_DEALLOC_FIFO_FULL = 26, + ME_REGS_VS_EVENT_FIFO_FULL = 27, + ME_REGS_PS_EVENT_FIFO_FULL = 28, + ME_REGS_CF_EVENT_FIFO_FULL = 29, + ME_MICRO_RB_STARVED = 30, + ME_MICRO_I1_STARVED = 31, + ME_MICRO_I2_STARVED = 32, + ME_MICRO_ST_STARVED = 33, + RCIU_RBBM_DWORD_SENT = 40, + ME_BUSY_CLOCKS = 41, + ME_WAIT_CONTEXT_AVAIL = 42, + PFP_TYPE0_PACKET = 43, + PFP_TYPE3_PACKET = 44, + CSF_RB_WPTR_NEQ_RPTR = 45, + CSF_I1_SIZE_NEQ_ZERO = 46, + CSF_I2_SIZE_NEQ_ZERO = 47, + CSF_RBI1I2_FETCHING = 48, +}; + +enum a2xx_rb_perfcnt_select { + RBPERF_CNTX_BUSY = 0, + RBPERF_CNTX_BUSY_MAX = 1, + RBPERF_SX_QUAD_STARVED = 2, + RBPERF_SX_QUAD_STARVED_MAX = 3, + RBPERF_GA_GC_CH0_SYS_REQ = 4, + RBPERF_GA_GC_CH0_SYS_REQ_MAX = 5, + RBPERF_GA_GC_CH1_SYS_REQ = 6, + RBPERF_GA_GC_CH1_SYS_REQ_MAX = 7, + RBPERF_MH_STARVED = 8, + RBPERF_MH_STARVED_MAX = 9, + RBPERF_AZ_BC_COLOR_BUSY = 10, + RBPERF_AZ_BC_COLOR_BUSY_MAX = 11, + RBPERF_AZ_BC_Z_BUSY = 12, + RBPERF_AZ_BC_Z_BUSY_MAX = 13, + RBPERF_RB_SC_TILE_RTR_N = 14, + RBPERF_RB_SC_TILE_RTR_N_MAX = 15, + RBPERF_RB_SC_SAMP_RTR_N = 16, + RBPERF_RB_SC_SAMP_RTR_N_MAX = 17, + RBPERF_RB_SX_QUAD_RTR_N = 18, + RBPERF_RB_SX_QUAD_RTR_N_MAX = 19, + RBPERF_RB_SX_COLOR_RTR_N = 20, + RBPERF_RB_SX_COLOR_RTR_N_MAX = 21, + RBPERF_RB_SC_SAMP_LZ_BUSY = 22, + RBPERF_RB_SC_SAMP_LZ_BUSY_MAX = 23, + RBPERF_ZXP_STALL = 24, + RBPERF_ZXP_STALL_MAX = 25, + RBPERF_EVENT_PENDING = 26, + RBPERF_EVENT_PENDING_MAX = 27, + RBPERF_RB_MH_VALID = 28, + RBPERF_RB_MH_VALID_MAX = 29, + RBPERF_SX_RB_QUAD_SEND = 30, + RBPERF_SX_RB_COLOR_SEND = 31, + RBPERF_SC_RB_TILE_SEND = 32, + RBPERF_SC_RB_SAMPLE_SEND = 33, + RBPERF_SX_RB_MEM_EXPORT = 34, + RBPERF_SX_RB_QUAD_EVENT = 35, + RBPERF_SC_RB_TILE_EVENT_FILTERED = 36, + RBPERF_SC_RB_TILE_EVENT_ALL = 37, + RBPERF_RB_SC_EZ_SEND = 38, + RBPERF_RB_SX_INDEX_SEND = 39, + RBPERF_GMEM_INTFO_RD = 40, + RBPERF_GMEM_INTF1_RD = 41, + RBPERF_GMEM_INTFO_WR = 42, + RBPERF_GMEM_INTF1_WR = 43, + RBPERF_RB_CP_CONTEXT_DONE = 44, + RBPERF_RB_CP_CACHE_FLUSH = 45, + RBPERF_ZPASS_DONE = 46, + RBPERF_ZCMD_VALID = 47, + RBPERF_CCMD_VALID = 48, + RBPERF_ACCUM_GRANT = 49, + RBPERF_ACCUM_C0_GRANT = 50, + RBPERF_ACCUM_C1_GRANT = 51, + RBPERF_ACCUM_FULL_BE_WR = 52, + RBPERF_ACCUM_REQUEST_NO_GRANT = 53, + RBPERF_ACCUM_TIMEOUT_PULSE = 54, + RBPERF_ACCUM_LIN_TIMEOUT_PULSE = 55, + RBPERF_ACCUM_CAM_HIT_FLUSHING = 56, +}; + +enum a2xx_mh_perfcnt_select { + CP_R0_REQUESTS = 0, + CP_R1_REQUESTS = 1, + CP_R2_REQUESTS = 2, + CP_R3_REQUESTS = 3, + CP_R4_REQUESTS = 4, + CP_TOTAL_READ_REQUESTS = 5, + CP_TOTAL_WRITE_REQUESTS = 6, + CP_TOTAL_REQUESTS = 7, + CP_DATA_BYTES_WRITTEN = 8, + CP_WRITE_CLEAN_RESPONSES = 9, + CP_R0_READ_BURSTS_RECEIVED = 10, + CP_R1_READ_BURSTS_RECEIVED = 11, + CP_R2_READ_BURSTS_RECEIVED = 12, + CP_R3_READ_BURSTS_RECEIVED = 13, + CP_R4_READ_BURSTS_RECEIVED = 14, + CP_TOTAL_READ_BURSTS_RECEIVED = 15, + CP_R0_DATA_BEATS_READ = 16, + CP_R1_DATA_BEATS_READ = 17, + CP_R2_DATA_BEATS_READ = 18, + CP_R3_DATA_BEATS_READ = 19, + CP_R4_DATA_BEATS_READ = 20, + CP_TOTAL_DATA_BEATS_READ = 21, + VGT_R0_REQUESTS = 22, + VGT_R1_REQUESTS = 23, + VGT_TOTAL_REQUESTS = 24, + VGT_R0_READ_BURSTS_RECEIVED = 25, + VGT_R1_READ_BURSTS_RECEIVED = 26, + VGT_TOTAL_READ_BURSTS_RECEIVED = 27, + VGT_R0_DATA_BEATS_READ = 28, + VGT_R1_DATA_BEATS_READ = 29, + VGT_TOTAL_DATA_BEATS_READ = 30, + TC_TOTAL_REQUESTS = 31, + TC_ROQ_REQUESTS = 32, + TC_INFO_SENT = 33, + TC_READ_BURSTS_RECEIVED = 34, + TC_DATA_BEATS_READ = 35, + TCD_BURSTS_READ = 36, + RB_REQUESTS = 37, + RB_DATA_BYTES_WRITTEN = 38, + RB_WRITE_CLEAN_RESPONSES = 39, + AXI_READ_REQUESTS_ID_0 = 40, + AXI_READ_REQUESTS_ID_1 = 41, + AXI_READ_REQUESTS_ID_2 = 42, + AXI_READ_REQUESTS_ID_3 = 43, + AXI_READ_REQUESTS_ID_4 = 44, + AXI_READ_REQUESTS_ID_5 = 45, + AXI_READ_REQUESTS_ID_6 = 46, + AXI_READ_REQUESTS_ID_7 = 47, + AXI_TOTAL_READ_REQUESTS = 48, + AXI_WRITE_REQUESTS_ID_0 = 49, + AXI_WRITE_REQUESTS_ID_1 = 50, + AXI_WRITE_REQUESTS_ID_2 = 51, + AXI_WRITE_REQUESTS_ID_3 = 52, + AXI_WRITE_REQUESTS_ID_4 = 53, + AXI_WRITE_REQUESTS_ID_5 = 54, + AXI_WRITE_REQUESTS_ID_6 = 55, + AXI_WRITE_REQUESTS_ID_7 = 56, + AXI_TOTAL_WRITE_REQUESTS = 57, + AXI_TOTAL_REQUESTS_ID_0 = 58, + AXI_TOTAL_REQUESTS_ID_1 = 59, + AXI_TOTAL_REQUESTS_ID_2 = 60, + AXI_TOTAL_REQUESTS_ID_3 = 61, + AXI_TOTAL_REQUESTS_ID_4 = 62, + AXI_TOTAL_REQUESTS_ID_5 = 63, + AXI_TOTAL_REQUESTS_ID_6 = 64, + AXI_TOTAL_REQUESTS_ID_7 = 65, + AXI_TOTAL_REQUESTS = 66, + AXI_READ_CHANNEL_BURSTS_ID_0 = 67, + AXI_READ_CHANNEL_BURSTS_ID_1 = 68, + AXI_READ_CHANNEL_BURSTS_ID_2 = 69, + AXI_READ_CHANNEL_BURSTS_ID_3 = 70, + AXI_READ_CHANNEL_BURSTS_ID_4 = 71, + AXI_READ_CHANNEL_BURSTS_ID_5 = 72, + AXI_READ_CHANNEL_BURSTS_ID_6 = 73, + AXI_READ_CHANNEL_BURSTS_ID_7 = 74, + AXI_READ_CHANNEL_TOTAL_BURSTS = 75, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0 = 76, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1 = 77, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2 = 78, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3 = 79, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4 = 80, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5 = 81, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6 = 82, + AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7 = 83, + AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ = 84, + AXI_WRITE_CHANNEL_BURSTS_ID_0 = 85, + AXI_WRITE_CHANNEL_BURSTS_ID_1 = 86, + AXI_WRITE_CHANNEL_BURSTS_ID_2 = 87, + AXI_WRITE_CHANNEL_BURSTS_ID_3 = 88, + AXI_WRITE_CHANNEL_BURSTS_ID_4 = 89, + AXI_WRITE_CHANNEL_BURSTS_ID_5 = 90, + AXI_WRITE_CHANNEL_BURSTS_ID_6 = 91, + AXI_WRITE_CHANNEL_BURSTS_ID_7 = 92, + AXI_WRITE_CHANNEL_TOTAL_BURSTS = 93, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0 = 94, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1 = 95, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2 = 96, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3 = 97, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4 = 98, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5 = 99, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6 = 100, + AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7 = 101, + AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN = 102, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0 = 103, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1 = 104, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2 = 105, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3 = 106, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4 = 107, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5 = 108, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6 = 109, + AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7 = 110, + AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES = 111, + TOTAL_MMU_MISSES = 112, + MMU_READ_MISSES = 113, + MMU_WRITE_MISSES = 114, + TOTAL_MMU_HITS = 115, + MMU_READ_HITS = 116, + MMU_WRITE_HITS = 117, + SPLIT_MODE_TC_HITS = 118, + SPLIT_MODE_TC_MISSES = 119, + SPLIT_MODE_NON_TC_HITS = 120, + SPLIT_MODE_NON_TC_MISSES = 121, + STALL_AWAITING_TLB_MISS_FETCH = 122, + MMU_TLB_MISS_READ_BURSTS_RECEIVED = 123, + MMU_TLB_MISS_DATA_BEATS_READ = 124, + CP_CYCLES_HELD_OFF = 125, + VGT_CYCLES_HELD_OFF = 126, + TC_CYCLES_HELD_OFF = 127, + TC_ROQ_CYCLES_HELD_OFF = 128, + TC_CYCLES_HELD_OFF_TCD_FULL = 129, + RB_CYCLES_HELD_OFF = 130, + TOTAL_CYCLES_ANY_CLNT_HELD_OFF = 131, + TLB_MISS_CYCLES_HELD_OFF = 132, + AXI_READ_REQUEST_HELD_OFF = 133, + AXI_WRITE_REQUEST_HELD_OFF = 134, + AXI_REQUEST_HELD_OFF = 135, + AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT = 136, + AXI_WRITE_DATA_HELD_OFF = 137, + CP_SAME_PAGE_BANK_REQUESTS = 138, + VGT_SAME_PAGE_BANK_REQUESTS = 139, + TC_SAME_PAGE_BANK_REQUESTS = 140, + TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS = 141, + RB_SAME_PAGE_BANK_REQUESTS = 142, + TOTAL_SAME_PAGE_BANK_REQUESTS = 143, + CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 144, + VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 145, + TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 146, + RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 147, + TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT = 148, + TOTAL_MH_READ_REQUESTS = 149, + TOTAL_MH_WRITE_REQUESTS = 150, + TOTAL_MH_REQUESTS = 151, + MH_BUSY = 152, + CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 153, + VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 154, + TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 155, + RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 156, + TC_ROQ_N_VALID_ENTRIES = 157, + ARQ_N_ENTRIES = 158, + WDB_N_ENTRIES = 159, + MH_READ_LATENCY_OUTST_REQ_SUM = 160, + MC_READ_LATENCY_OUTST_REQ_SUM = 161, + MC_TOTAL_READ_REQUESTS = 162, + ELAPSED_CYCLES_MH_GATED_CLK = 163, + ELAPSED_CLK_CYCLES = 164, + CP_W_16B_REQUESTS = 165, + CP_W_32B_REQUESTS = 166, + TC_16B_REQUESTS = 167, + TC_32B_REQUESTS = 168, + PA_REQUESTS = 169, + PA_DATA_BYTES_WRITTEN = 170, + PA_WRITE_CLEAN_RESPONSES = 171, + PA_CYCLES_HELD_OFF = 172, + AXI_READ_REQUEST_DATA_BEATS_ID_0 = 173, + AXI_READ_REQUEST_DATA_BEATS_ID_1 = 174, + AXI_READ_REQUEST_DATA_BEATS_ID_2 = 175, + AXI_READ_REQUEST_DATA_BEATS_ID_3 = 176, + AXI_READ_REQUEST_DATA_BEATS_ID_4 = 177, + AXI_READ_REQUEST_DATA_BEATS_ID_5 = 178, + AXI_READ_REQUEST_DATA_BEATS_ID_6 = 179, + AXI_READ_REQUEST_DATA_BEATS_ID_7 = 180, + AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181, +}; + +enum adreno_mmu_clnt_beh { + BEH_NEVR = 0, + BEH_TRAN_RNG = 1, + BEH_TRAN_FLT = 2, +}; + +enum sq_tex_clamp { + SQ_TEX_WRAP = 0, + SQ_TEX_MIRROR = 1, + SQ_TEX_CLAMP_LAST_TEXEL = 2, + SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3, + SQ_TEX_CLAMP_HALF_BORDER = 4, + SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5, + SQ_TEX_CLAMP_BORDER = 6, + SQ_TEX_MIRROR_ONCE_BORDER = 7, +}; + +enum sq_tex_swiz { + SQ_TEX_X = 0, + SQ_TEX_Y = 1, + SQ_TEX_Z = 2, + SQ_TEX_W = 3, + SQ_TEX_ZERO = 4, + SQ_TEX_ONE = 5, +}; + +enum sq_tex_filter { + SQ_TEX_FILTER_POINT = 0, + SQ_TEX_FILTER_BILINEAR = 1, + SQ_TEX_FILTER_BASEMAP = 2, + SQ_TEX_FILTER_USE_FETCH_CONST = 3, +}; + +enum sq_tex_aniso_filter { + SQ_TEX_ANISO_FILTER_DISABLED = 0, + SQ_TEX_ANISO_FILTER_MAX_1_1 = 1, + SQ_TEX_ANISO_FILTER_MAX_2_1 = 2, + SQ_TEX_ANISO_FILTER_MAX_4_1 = 3, + SQ_TEX_ANISO_FILTER_MAX_8_1 = 4, + SQ_TEX_ANISO_FILTER_MAX_16_1 = 5, + SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7, +}; + +enum sq_tex_dimension { + SQ_TEX_DIMENSION_1D = 0, + SQ_TEX_DIMENSION_2D = 1, + SQ_TEX_DIMENSION_3D = 2, + SQ_TEX_DIMENSION_CUBE = 3, +}; + +enum sq_tex_border_color { + SQ_TEX_BORDER_COLOR_BLACK = 0, + SQ_TEX_BORDER_COLOR_WHITE = 1, + SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2, + SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3, +}; + +enum sq_tex_sign { + SQ_TEX_SIGN_UNSIGNED = 0, + SQ_TEX_SIGN_SIGNED = 1, + SQ_TEX_SIGN_UNSIGNED_BIASED = 2, + SQ_TEX_SIGN_GAMMA = 3, +}; + +enum sq_tex_endian { + SQ_TEX_ENDIAN_NONE = 0, + SQ_TEX_ENDIAN_8IN16 = 1, + SQ_TEX_ENDIAN_8IN32 = 2, + SQ_TEX_ENDIAN_16IN32 = 3, +}; + +enum sq_tex_clamp_policy { + SQ_TEX_CLAMP_POLICY_D3D = 0, + SQ_TEX_CLAMP_POLICY_OGL = 1, +}; + +enum sq_tex_num_format { + SQ_TEX_NUM_FORMAT_FRAC = 0, + SQ_TEX_NUM_FORMAT_INT = 1, +}; + +enum sq_tex_type { + SQ_TEX_TYPE_0 = 0, + SQ_TEX_TYPE_1 = 1, + SQ_TEX_TYPE_2 = 2, + SQ_TEX_TYPE_3 = 3, +}; + +#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001 + +#define REG_A2XX_RBBM_CNTL 0x0000003b + +#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c + +#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0 + +#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1 + +#define REG_A2XX_MH_MMU_CONFIG 0x00000040 +#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001 +#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002 +#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030 +#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4 +static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0 +#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6 +static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300 +#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8 +static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00 +#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10 +static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000 +#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12 +static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000 +#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14 +static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000 +#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16 +static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000 +#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18 +static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000 +#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20 +static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000 +#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22 +static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK; +} +#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000 +#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24 +static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val) +{ + return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK; +} + +#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041 +#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff +#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0 +static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val) +{ + return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK; +} +#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000 +#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12 +static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val) +{ + return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK; +} + +#define REG_A2XX_MH_MMU_PT_BASE 0x00000042 + +#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043 + +#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044 + +#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045 +#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001 +#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002 + +#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046 + +#define REG_A2XX_MH_MMU_MPU_END 0x00000047 + +#define REG_A2XX_NQWAIT_UNTIL 0x00000394 + +#define REG_A2XX_RBBM_PERFCOUNTER0_SELECT 0x00000395 + +#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000396 + +#define REG_A2XX_RBBM_PERFCOUNTER0_LO 0x00000397 + +#define REG_A2XX_RBBM_PERFCOUNTER0_HI 0x00000398 + +#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000399 + +#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x0000039a + +#define REG_A2XX_RBBM_DEBUG 0x0000039b + +#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c +#define A2XX_RBBM_PM_OVERRIDE1_RBBM_AHBCLK_PM_OVERRIDE 0x00000001 +#define A2XX_RBBM_PM_OVERRIDE1_SC_REG_SCLK_PM_OVERRIDE 0x00000002 +#define A2XX_RBBM_PM_OVERRIDE1_SC_SCLK_PM_OVERRIDE 0x00000004 +#define A2XX_RBBM_PM_OVERRIDE1_SP_TOP_SCLK_PM_OVERRIDE 0x00000008 +#define A2XX_RBBM_PM_OVERRIDE1_SP_V0_SCLK_PM_OVERRIDE 0x00000010 +#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_SCLK_PM_OVERRIDE 0x00000020 +#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_FIFOS_SCLK_PM_OVERRIDE 0x00000040 +#define A2XX_RBBM_PM_OVERRIDE1_SQ_CONST_MEM_SCLK_PM_OVERRIDE 0x00000080 +#define A2XX_RBBM_PM_OVERRIDE1_SQ_SQ_SCLK_PM_OVERRIDE 0x00000100 +#define A2XX_RBBM_PM_OVERRIDE1_SX_SCLK_PM_OVERRIDE 0x00000200 +#define A2XX_RBBM_PM_OVERRIDE1_SX_REG_SCLK_PM_OVERRIDE 0x00000400 +#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCO_SCLK_PM_OVERRIDE 0x00000800 +#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCM_SCLK_PM_OVERRIDE 0x00001000 +#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCD_SCLK_PM_OVERRIDE 0x00002000 +#define A2XX_RBBM_PM_OVERRIDE1_TCM_REG_SCLK_PM_OVERRIDE 0x00004000 +#define A2XX_RBBM_PM_OVERRIDE1_TPC_TPC_SCLK_PM_OVERRIDE 0x00008000 +#define A2XX_RBBM_PM_OVERRIDE1_TPC_REG_SCLK_PM_OVERRIDE 0x00010000 +#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCA_SCLK_PM_OVERRIDE 0x00020000 +#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_SCLK_PM_OVERRIDE 0x00040000 +#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_READ_SCLK_PM_OVERRIDE 0x00080000 +#define A2XX_RBBM_PM_OVERRIDE1_TP_TP_SCLK_PM_OVERRIDE 0x00100000 +#define A2XX_RBBM_PM_OVERRIDE1_TP_REG_SCLK_PM_OVERRIDE 0x00200000 +#define A2XX_RBBM_PM_OVERRIDE1_CP_G_SCLK_PM_OVERRIDE 0x00400000 +#define A2XX_RBBM_PM_OVERRIDE1_CP_REG_SCLK_PM_OVERRIDE 0x00800000 +#define A2XX_RBBM_PM_OVERRIDE1_CP_G_REG_SCLK_PM_OVERRIDE 0x01000000 +#define A2XX_RBBM_PM_OVERRIDE1_SPI_SCLK_PM_OVERRIDE 0x02000000 +#define A2XX_RBBM_PM_OVERRIDE1_RB_REG_SCLK_PM_OVERRIDE 0x04000000 +#define A2XX_RBBM_PM_OVERRIDE1_RB_SCLK_PM_OVERRIDE 0x08000000 +#define A2XX_RBBM_PM_OVERRIDE1_MH_MH_SCLK_PM_OVERRIDE 0x10000000 +#define A2XX_RBBM_PM_OVERRIDE1_MH_REG_SCLK_PM_OVERRIDE 0x20000000 +#define A2XX_RBBM_PM_OVERRIDE1_MH_MMU_SCLK_PM_OVERRIDE 0x40000000 +#define A2XX_RBBM_PM_OVERRIDE1_MH_TCROQ_SCLK_PM_OVERRIDE 0x80000000 + +#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d + +#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0 + +#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1 + +#define REG_A2XX_RBBM_READ_ERROR 0x000003b3 + +#define REG_A2XX_RBBM_INT_CNTL 0x000003b4 +#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001 +#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002 +#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000 + +#define REG_A2XX_RBBM_INT_STATUS 0x000003b5 + +#define REG_A2XX_RBBM_INT_ACK 0x000003b6 + +#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7 +#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020 +#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000 +#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000 +#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000 + +#define REG_A2XX_RBBM_PERIPHID1 0x000003f9 + +#define REG_A2XX_RBBM_PERIPHID2 0x000003fa + +#define REG_A2XX_CP_PERFMON_CNTL 0x00000444 + +#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445 + +#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446 + +#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447 + +#define REG_A2XX_RBBM_STATUS 0x000005d0 +#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f +#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0 +static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val) +{ + return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK; +} +#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020 +#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100 +#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200 +#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400 +#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800 +#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000 +#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000 +#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000 +#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000 +#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000 +#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000 +#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000 +#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000 +#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000 +#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000 +#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000 +#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000 +#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000 +#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000 + +#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40 +#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f +#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0 +static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val) +{ + return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK; +} +#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040 +#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080 +#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100 +#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200 +#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00 +#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10 +static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val) +{ + return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK; +} +#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000 +#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000 +#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000 +#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000 +#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16 +static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val) +{ + return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK; +} +#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000 +#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000 +#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000 +#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000 +#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000 + +#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42 +#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001 +#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002 +#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004 + +#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43 + +#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44 + +#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54 + +#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55 + +#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01 +#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f +#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val) +{ + return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK; +} +#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 +#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5 +static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK; +} + +static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; } + +static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; } + +static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; } + +static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } + +#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38 + +#define REG_A2XX_PC_DEBUG_DATA 0x00000c39 + +#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44 + +#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80 + +#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80 + +#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81 + +#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81 + +#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86 +#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK 0xffffffe0 +#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT 5 +static inline uint32_t A2XX_PA_SU_FACE_DATA_BASE_ADDR(uint32_t val) +{ + return ((val) << A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT) & A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK; +} + +#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00 +#define A2XX_SQ_GPR_MANAGEMENT_REG_DYNAMIC 0x00000001 +#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK 0x00000ff0 +#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT 4 +static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX(uint32_t val) +{ + return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK; +} +#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK 0x000ff000 +#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT 12 +static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX(uint32_t val) +{ + return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK; +} + +#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01 + +#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02 +#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK 0x00000fff +#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT 0 +static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX(uint32_t val) +{ + return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK; +} +#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK 0x0fff0000 +#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT 16 +static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX(uint32_t val) +{ + return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK; +} + +#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05 + +#define REG_A2XX_SQ_INT_CNTL 0x00000d34 + +#define REG_A2XX_SQ_INT_STATUS 0x00000d35 + +#define REG_A2XX_SQ_INT_ACK 0x00000d36 + +#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae + +#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf + +#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0 + +#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1 + +#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2 + +#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3 + +#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4 + +#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5 + +#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6 + +#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7 + +#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8 + +#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9 + +#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba + +#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb + +#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0 + +#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1 + +#define REG_A2XX_TC_CNTL_STATUS 0x00000e00 +#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001 + +#define REG_A2XX_TP0_CHICKEN 0x00000e1e + +#define REG_A2XX_RB_BC_CONTROL 0x00000f01 +#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001 +#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006 +#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1 +static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK; +} +#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008 +#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010 +#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020 +#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040 +#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080 +#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00 +#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8 +static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK; +} +#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000 +#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000 +#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000 +#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000 +#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000 +#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18 +static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK; +} +#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000 +#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000 +#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23 +static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK; +} +#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000 +#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27 +static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val) +{ + return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK; +} +#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000 +#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000 +#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000 + +#define REG_A2XX_RB_EDRAM_INFO 0x00000f02 + +#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26 + +#define REG_A2XX_RB_DEBUG_DATA 0x00000f27 + +#define REG_A2XX_RB_SURFACE_INFO 0x00002000 +#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff +#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0 +static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val) +{ + return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK; +} +#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000 +#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14 +static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val) +{ + return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK; +} + +#define REG_A2XX_RB_COLOR_INFO 0x00002001 +#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f +#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0 +static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val) +{ + return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK; +} +#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030 +#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4 +static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val) +{ + return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK; +} +#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040 +#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180 +#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7 +static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val) +{ + return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK; +} +#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600 +#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9 +static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val) +{ + return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK; +} +#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000 +#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12 +static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val) +{ + return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK; +} + +#define REG_A2XX_RB_DEPTH_INFO 0x00002002 +#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 +#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) +{ + return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; +} +#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000 +#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12 +static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) +{ + return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; +} + +#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005 + +#define REG_A2XX_COHER_DEST_BASE_0 0x00002006 + +#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK; +} +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK; +} + +#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK; +} +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK; +} + +#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080 +#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff +#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK; +} +#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK; +} +#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000 + +#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081 +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082 +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A2XX_UNKNOWN_2010 0x00002010 + +#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100 + +#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101 + +#define REG_A2XX_VGT_INDX_OFFSET 0x00002102 + +#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103 + +#define REG_A2XX_RB_COLOR_MASK 0x00002104 +#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001 +#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002 +#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004 +#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008 + +#define REG_A2XX_RB_BLEND_RED 0x00002105 + +#define REG_A2XX_RB_BLEND_GREEN 0x00002106 + +#define REG_A2XX_RB_BLEND_BLUE 0x00002107 + +#define REG_A2XX_RB_BLEND_ALPHA 0x00002108 + +#define REG_A2XX_RB_FOG_COLOR 0x00002109 +#define A2XX_RB_FOG_COLOR_FOG_RED__MASK 0x000000ff +#define A2XX_RB_FOG_COLOR_FOG_RED__SHIFT 0 +static inline uint32_t A2XX_RB_FOG_COLOR_FOG_RED(uint32_t val) +{ + return ((val) << A2XX_RB_FOG_COLOR_FOG_RED__SHIFT) & A2XX_RB_FOG_COLOR_FOG_RED__MASK; +} +#define A2XX_RB_FOG_COLOR_FOG_GREEN__MASK 0x0000ff00 +#define A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT 8 +static inline uint32_t A2XX_RB_FOG_COLOR_FOG_GREEN(uint32_t val) +{ + return ((val) << A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT) & A2XX_RB_FOG_COLOR_FOG_GREEN__MASK; +} +#define A2XX_RB_FOG_COLOR_FOG_BLUE__MASK 0x00ff0000 +#define A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT 16 +static inline uint32_t A2XX_RB_FOG_COLOR_FOG_BLUE(uint32_t val) +{ + return ((val) << A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT) & A2XX_RB_FOG_COLOR_FOG_BLUE__MASK; +} + +#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c +#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff +#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 +static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; +} +#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 +#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 +static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; +} +#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 +#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; +} + +#define REG_A2XX_RB_STENCILREFMASK 0x0000210d +#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff +#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 +static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK; +} +#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 +#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 +static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK; +} +#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 +#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; +} + +#define REG_A2XX_RB_ALPHA_REF 0x0000210e + +#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f +#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110 +#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111 +#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112 +#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113 +#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK; +} + +#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114 +#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff +#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0 +static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val) +{ + return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK; +} + +#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180 +#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff +#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00 +#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000 +#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000 +#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000 +#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000 +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000 +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000 +#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000 +#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27 +static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val) +{ + return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK; +} +#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000 + +#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181 +#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001 +#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002 +#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c +#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2 +static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val) +{ + return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK; +} +#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00 +#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8 +static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val) +{ + return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK; +} +#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000 +#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000 +#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000 + +#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182 +#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK 0x0000ffff +#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT 0 +static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE(uint32_t val) +{ + return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK; +} +#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK 0xffff0000 +#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT 16 +static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN(uint32_t val) +{ + return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK; +} + +#define REG_A2XX_SQ_WRAPPING_0 0x00002183 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK 0x0000000f +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT 0 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_0(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK; +} +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK 0x000000f0 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT 4 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_1(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK; +} +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK 0x00000f00 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT 8 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_2(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK; +} +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK 0x0000f000 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT 12 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_3(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK; +} +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK 0x000f0000 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT 16 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_4(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK; +} +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK 0x00f00000 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT 20 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_5(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK; +} +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK 0x0f000000 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT 24 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_6(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK; +} +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK 0xf0000000 +#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT 28 +static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_7(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK; +} + +#define REG_A2XX_SQ_WRAPPING_1 0x00002184 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK 0x0000000f +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT 0 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_8(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK; +} +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK 0x000000f0 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT 4 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_9(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK; +} +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK 0x00000f00 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT 8 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_10(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK; +} +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK 0x0000f000 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT 12 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_11(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK; +} +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK 0x000f0000 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT 16 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_12(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK; +} +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK 0x00f00000 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT 20 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_13(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK; +} +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK 0x0f000000 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT 24 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_14(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK; +} +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK 0xf0000000 +#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT 28 +static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_15(uint32_t val) +{ + return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK; +} + +#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6 +#define A2XX_SQ_PS_PROGRAM_BASE__MASK 0x00000fff +#define A2XX_SQ_PS_PROGRAM_BASE__SHIFT 0 +static inline uint32_t A2XX_SQ_PS_PROGRAM_BASE(uint32_t val) +{ + return ((val) << A2XX_SQ_PS_PROGRAM_BASE__SHIFT) & A2XX_SQ_PS_PROGRAM_BASE__MASK; +} +#define A2XX_SQ_PS_PROGRAM_SIZE__MASK 0x00fff000 +#define A2XX_SQ_PS_PROGRAM_SIZE__SHIFT 12 +static inline uint32_t A2XX_SQ_PS_PROGRAM_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_PS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_PS_PROGRAM_SIZE__MASK; +} + +#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7 +#define A2XX_SQ_VS_PROGRAM_BASE__MASK 0x00000fff +#define A2XX_SQ_VS_PROGRAM_BASE__SHIFT 0 +static inline uint32_t A2XX_SQ_VS_PROGRAM_BASE(uint32_t val) +{ + return ((val) << A2XX_SQ_VS_PROGRAM_BASE__SHIFT) & A2XX_SQ_VS_PROGRAM_BASE__MASK; +} +#define A2XX_SQ_VS_PROGRAM_SIZE__MASK 0x00fff000 +#define A2XX_SQ_VS_PROGRAM_SIZE__SHIFT 12 +static inline uint32_t A2XX_SQ_VS_PROGRAM_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_VS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_VS_PROGRAM_SIZE__MASK; +} + +#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9 + +#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc +#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f +#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 +#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600 +#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800 +#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK; +} +#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 +#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 +#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000 +#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24 +static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val) +{ + return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK; +} + +#define REG_A2XX_VGT_IMMED_DATA 0x000021fd + +#define REG_A2XX_RB_DEPTHCONTROL 0x00002200 +#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001 +#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002 +#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004 +#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008 +#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070 +#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4 +static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK; +} +#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080 +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700 +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800 +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000 +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000 +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000 +#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000 +#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000 +#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK; +} +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000 +#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29 +static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK; +} + +#define REG_A2XX_RB_BLEND_CONTROL 0x00002201 +#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f +#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0 +static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0 +#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5 +static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK; +} +#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00 +#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8 +static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000 +#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16 +static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000 +#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21 +static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK; +} +#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000 +#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24 +static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val) +{ + return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK; +} +#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000 +#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000 + +#define REG_A2XX_RB_COLORCONTROL 0x00002202 +#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007 +#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010 +#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020 +#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040 +#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080 +#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00 +#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8 +static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK; +} +#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000 +#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12 +static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK; +} +#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000 +#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14 +static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val) +{ + return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK; +} +#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK; +} +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000 +#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30 +static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val) +{ + return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK; +} + +#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0 +#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK; +} + +#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204 +#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 +#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000 +#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000 +#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19 +static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val) +{ + return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK; +} +#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000 +#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000 +#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000 +#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000 +#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000 + +#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205 +#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001 +#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002 +#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004 +#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018 +#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3 +static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val) +{ + return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK; +} +#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0 +#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5 +static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK; +} +#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700 +#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8 +static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK; +} +#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800 +#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000 +#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000 +#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000 +#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000 +#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000 +#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000 +#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000 +#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000 +#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000 +#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000 +#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000 +#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000 +#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000 +#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000 +#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000 + +#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206 +#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001 +#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010 +#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020 +#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100 +#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200 +#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400 +#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800 + +#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK; +} +#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0 +#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6 +static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val) +{ + return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK; +} + +#define REG_A2XX_RB_MODECONTROL 0x00002208 +#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007 +#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0 +static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val) +{ + return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK; +} + +#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209 + +#define REG_A2XX_RB_SAMPLE_POS 0x0000220a + +#define REG_A2XX_CLEAR_COLOR 0x0000220b +#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff +#define A2XX_CLEAR_COLOR_RED__SHIFT 0 +static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK; +} +#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00 +#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8 +static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK; +} +#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000 +#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16 +static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK; +} +#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000 +#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24 +static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val) +{ + return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK; +} + +#define REG_A2XX_A220_GRAS_CONTROL 0x00002210 + +#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280 +#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff +#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0 +static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val) +{ + return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK; +} +#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000 +#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16 +static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val) +{ + return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK; +} + +#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281 +#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK; +} +#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK; +} + +#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282 +#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff +#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0 +static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val) +{ + return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK; +} + +#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283 +#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff +#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK; +} +#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000 +#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK; +} +#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000 +#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK; +} +#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000 +#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29 +static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val) +{ + return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK; +} + +#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293 +#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ENA 0x00000001 +#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK 0x0000007e +#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT 1 +static inline uint32_t A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID(uint32_t val) +{ + return ((val) << A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT) & A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK; +} +#define A2XX_PA_SC_VIZ_QUERY_KILL_PIX_POST_EARLY_Z 0x00000100 + +#define REG_A2XX_VGT_ENHANCE 0x00002294 + +#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300 +#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff +#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0 +static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val) +{ + return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK; +} +#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100 +#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200 +#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400 + +#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301 +#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK 0x00000007 +#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT 0 +static inline uint32_t A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES(uint32_t val) +{ + return ((val) << A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT) & A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK; +} +#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK 0x0001e000 +#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT 13 +static inline uint32_t A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST(uint32_t val) +{ + return ((val) << A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT) & A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK; +} + +#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302 +#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001 +#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0 +static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val) +{ + return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK; +} +#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006 +#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1 +static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val) +{ + return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK; +} +#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380 +#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7 +static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val) +{ + return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK; +} + +#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303 +#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK; +} + +#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304 +#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK; +} + +#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305 +#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK; +} + +#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306 +#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff +#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0 +static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val) +{ + return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK; +} + +#define REG_A2XX_SQ_VS_CONST 0x00002307 +#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff +#define A2XX_SQ_VS_CONST_BASE__SHIFT 0 +static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val) +{ + return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK; +} +#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000 +#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12 +static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK; +} + +#define REG_A2XX_SQ_PS_CONST 0x00002308 +#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff +#define A2XX_SQ_PS_CONST_BASE__SHIFT 0 +static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val) +{ + return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK; +} +#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000 +#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12 +static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK; +} + +#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309 + +#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a + +#define REG_A2XX_PA_SC_AA_MASK 0x00002312 + +#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316 +#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK 0x00000007 +#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT 0 +static inline uint32_t A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH(uint32_t val) +{ + return ((val) << A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT) & A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK; +} + +#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317 +#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK 0x00000003 +#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT 0 +static inline uint32_t A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST(uint32_t val) +{ + return ((val) << A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT) & A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK; +} + +#define REG_A2XX_RB_COPY_CONTROL 0x00002318 +#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007 +#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val) +{ + return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK; +} +#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008 +#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0 +#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4 +static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK; +} + +#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319 + +#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a +#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff +#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val) +{ + return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK; +} + +#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b +#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007 +#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008 +#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0 +#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 +#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 +#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000 +#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12 +static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val) +{ + return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK; +} +#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000 +#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000 +#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000 +#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000 + +#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c +#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff +#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0 +static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK; +} +#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000 +#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13 +static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val) +{ + return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK; +} + +#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d + +#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324 + +#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326 + +#define REG_A2XX_A225_GRAS_UCP0X 0x00002340 + +#define REG_A2XX_A225_GRAS_UCP5W 0x00002357 + +#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360 + +#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380 + +#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00002381 + +#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_SCALE 0x00002382 + +#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383 + +#define REG_A2XX_SQ_CONSTANT_0 0x00004000 + +#define REG_A2XX_SQ_FETCH_0 0x00004800 + +#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900 + +#define REG_A2XX_SQ_CF_LOOP 0x00004908 + +#define REG_A2XX_COHER_SIZE_PM4 0x00000a29 + +#define REG_A2XX_COHER_BASE_PM4 0x00000a2a + +#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b + +#define REG_A2XX_PA_SU_PERFCOUNTER0_SELECT 0x00000c88 + +#define REG_A2XX_PA_SU_PERFCOUNTER1_SELECT 0x00000c89 + +#define REG_A2XX_PA_SU_PERFCOUNTER2_SELECT 0x00000c8a + +#define REG_A2XX_PA_SU_PERFCOUNTER3_SELECT 0x00000c8b + +#define REG_A2XX_PA_SU_PERFCOUNTER0_LOW 0x00000c8c + +#define REG_A2XX_PA_SU_PERFCOUNTER0_HI 0x00000c8d + +#define REG_A2XX_PA_SU_PERFCOUNTER1_LOW 0x00000c8e + +#define REG_A2XX_PA_SU_PERFCOUNTER1_HI 0x00000c8f + +#define REG_A2XX_PA_SU_PERFCOUNTER2_LOW 0x00000c90 + +#define REG_A2XX_PA_SU_PERFCOUNTER2_HI 0x00000c91 + +#define REG_A2XX_PA_SU_PERFCOUNTER3_LOW 0x00000c92 + +#define REG_A2XX_PA_SU_PERFCOUNTER3_HI 0x00000c93 + +#define REG_A2XX_PA_SC_PERFCOUNTER0_SELECT 0x00000c98 + +#define REG_A2XX_PA_SC_PERFCOUNTER0_LOW 0x00000c99 + +#define REG_A2XX_PA_SC_PERFCOUNTER0_HI 0x00000c9a + +#define REG_A2XX_VGT_PERFCOUNTER0_SELECT 0x00000c48 + +#define REG_A2XX_VGT_PERFCOUNTER1_SELECT 0x00000c49 + +#define REG_A2XX_VGT_PERFCOUNTER2_SELECT 0x00000c4a + +#define REG_A2XX_VGT_PERFCOUNTER3_SELECT 0x00000c4b + +#define REG_A2XX_VGT_PERFCOUNTER0_LOW 0x00000c4c + +#define REG_A2XX_VGT_PERFCOUNTER1_LOW 0x00000c4e + +#define REG_A2XX_VGT_PERFCOUNTER2_LOW 0x00000c50 + +#define REG_A2XX_VGT_PERFCOUNTER3_LOW 0x00000c52 + +#define REG_A2XX_VGT_PERFCOUNTER0_HI 0x00000c4d + +#define REG_A2XX_VGT_PERFCOUNTER1_HI 0x00000c4f + +#define REG_A2XX_VGT_PERFCOUNTER2_HI 0x00000c51 + +#define REG_A2XX_VGT_PERFCOUNTER3_HI 0x00000c53 + +#define REG_A2XX_TCR_PERFCOUNTER0_SELECT 0x00000e05 + +#define REG_A2XX_TCR_PERFCOUNTER1_SELECT 0x00000e08 + +#define REG_A2XX_TCR_PERFCOUNTER0_HI 0x00000e06 + +#define REG_A2XX_TCR_PERFCOUNTER1_HI 0x00000e09 + +#define REG_A2XX_TCR_PERFCOUNTER0_LOW 0x00000e07 + +#define REG_A2XX_TCR_PERFCOUNTER1_LOW 0x00000e0a + +#define REG_A2XX_TP0_PERFCOUNTER0_SELECT 0x00000e1f + +#define REG_A2XX_TP0_PERFCOUNTER0_HI 0x00000e20 + +#define REG_A2XX_TP0_PERFCOUNTER0_LOW 0x00000e21 + +#define REG_A2XX_TP0_PERFCOUNTER1_SELECT 0x00000e22 + +#define REG_A2XX_TP0_PERFCOUNTER1_HI 0x00000e23 + +#define REG_A2XX_TP0_PERFCOUNTER1_LOW 0x00000e24 + +#define REG_A2XX_TCM_PERFCOUNTER0_SELECT 0x00000e54 + +#define REG_A2XX_TCM_PERFCOUNTER1_SELECT 0x00000e57 + +#define REG_A2XX_TCM_PERFCOUNTER0_HI 0x00000e55 + +#define REG_A2XX_TCM_PERFCOUNTER1_HI 0x00000e58 + +#define REG_A2XX_TCM_PERFCOUNTER0_LOW 0x00000e56 + +#define REG_A2XX_TCM_PERFCOUNTER1_LOW 0x00000e59 + +#define REG_A2XX_TCF_PERFCOUNTER0_SELECT 0x00000e5a + +#define REG_A2XX_TCF_PERFCOUNTER1_SELECT 0x00000e5d + +#define REG_A2XX_TCF_PERFCOUNTER2_SELECT 0x00000e60 + +#define REG_A2XX_TCF_PERFCOUNTER3_SELECT 0x00000e63 + +#define REG_A2XX_TCF_PERFCOUNTER4_SELECT 0x00000e66 + +#define REG_A2XX_TCF_PERFCOUNTER5_SELECT 0x00000e69 + +#define REG_A2XX_TCF_PERFCOUNTER6_SELECT 0x00000e6c + +#define REG_A2XX_TCF_PERFCOUNTER7_SELECT 0x00000e6f + +#define REG_A2XX_TCF_PERFCOUNTER8_SELECT 0x00000e72 + +#define REG_A2XX_TCF_PERFCOUNTER9_SELECT 0x00000e75 + +#define REG_A2XX_TCF_PERFCOUNTER10_SELECT 0x00000e78 + +#define REG_A2XX_TCF_PERFCOUNTER11_SELECT 0x00000e7b + +#define REG_A2XX_TCF_PERFCOUNTER0_HI 0x00000e5b + +#define REG_A2XX_TCF_PERFCOUNTER1_HI 0x00000e5e + +#define REG_A2XX_TCF_PERFCOUNTER2_HI 0x00000e61 + +#define REG_A2XX_TCF_PERFCOUNTER3_HI 0x00000e64 + +#define REG_A2XX_TCF_PERFCOUNTER4_HI 0x00000e67 + +#define REG_A2XX_TCF_PERFCOUNTER5_HI 0x00000e6a + +#define REG_A2XX_TCF_PERFCOUNTER6_HI 0x00000e6d + +#define REG_A2XX_TCF_PERFCOUNTER7_HI 0x00000e70 + +#define REG_A2XX_TCF_PERFCOUNTER8_HI 0x00000e73 + +#define REG_A2XX_TCF_PERFCOUNTER9_HI 0x00000e76 + +#define REG_A2XX_TCF_PERFCOUNTER10_HI 0x00000e79 + +#define REG_A2XX_TCF_PERFCOUNTER11_HI 0x00000e7c + +#define REG_A2XX_TCF_PERFCOUNTER0_LOW 0x00000e5c + +#define REG_A2XX_TCF_PERFCOUNTER1_LOW 0x00000e5f + +#define REG_A2XX_TCF_PERFCOUNTER2_LOW 0x00000e62 + +#define REG_A2XX_TCF_PERFCOUNTER3_LOW 0x00000e65 + +#define REG_A2XX_TCF_PERFCOUNTER4_LOW 0x00000e68 + +#define REG_A2XX_TCF_PERFCOUNTER5_LOW 0x00000e6b + +#define REG_A2XX_TCF_PERFCOUNTER6_LOW 0x00000e6e + +#define REG_A2XX_TCF_PERFCOUNTER7_LOW 0x00000e71 + +#define REG_A2XX_TCF_PERFCOUNTER8_LOW 0x00000e74 + +#define REG_A2XX_TCF_PERFCOUNTER9_LOW 0x00000e77 + +#define REG_A2XX_TCF_PERFCOUNTER10_LOW 0x00000e7a + +#define REG_A2XX_TCF_PERFCOUNTER11_LOW 0x00000e7d + +#define REG_A2XX_SQ_PERFCOUNTER0_SELECT 0x00000dc8 + +#define REG_A2XX_SQ_PERFCOUNTER1_SELECT 0x00000dc9 + +#define REG_A2XX_SQ_PERFCOUNTER2_SELECT 0x00000dca + +#define REG_A2XX_SQ_PERFCOUNTER3_SELECT 0x00000dcb + +#define REG_A2XX_SQ_PERFCOUNTER0_LOW 0x00000dcc + +#define REG_A2XX_SQ_PERFCOUNTER0_HI 0x00000dcd + +#define REG_A2XX_SQ_PERFCOUNTER1_LOW 0x00000dce + +#define REG_A2XX_SQ_PERFCOUNTER1_HI 0x00000dcf + +#define REG_A2XX_SQ_PERFCOUNTER2_LOW 0x00000dd0 + +#define REG_A2XX_SQ_PERFCOUNTER2_HI 0x00000dd1 + +#define REG_A2XX_SQ_PERFCOUNTER3_LOW 0x00000dd2 + +#define REG_A2XX_SQ_PERFCOUNTER3_HI 0x00000dd3 + +#define REG_A2XX_SX_PERFCOUNTER0_SELECT 0x00000dd4 + +#define REG_A2XX_SX_PERFCOUNTER0_LOW 0x00000dd8 + +#define REG_A2XX_SX_PERFCOUNTER0_HI 0x00000dd9 + +#define REG_A2XX_MH_PERFCOUNTER0_SELECT 0x00000a46 + +#define REG_A2XX_MH_PERFCOUNTER1_SELECT 0x00000a4a + +#define REG_A2XX_MH_PERFCOUNTER0_CONFIG 0x00000a47 + +#define REG_A2XX_MH_PERFCOUNTER1_CONFIG 0x00000a4b + +#define REG_A2XX_MH_PERFCOUNTER0_LOW 0x00000a48 + +#define REG_A2XX_MH_PERFCOUNTER1_LOW 0x00000a4c + +#define REG_A2XX_MH_PERFCOUNTER0_HI 0x00000a49 + +#define REG_A2XX_MH_PERFCOUNTER1_HI 0x00000a4d + +#define REG_A2XX_RB_PERFCOUNTER0_SELECT 0x00000f04 + +#define REG_A2XX_RB_PERFCOUNTER1_SELECT 0x00000f05 + +#define REG_A2XX_RB_PERFCOUNTER2_SELECT 0x00000f06 + +#define REG_A2XX_RB_PERFCOUNTER3_SELECT 0x00000f07 + +#define REG_A2XX_RB_PERFCOUNTER0_LOW 0x00000f08 + +#define REG_A2XX_RB_PERFCOUNTER0_HI 0x00000f09 + +#define REG_A2XX_RB_PERFCOUNTER1_LOW 0x00000f0a + +#define REG_A2XX_RB_PERFCOUNTER1_HI 0x00000f0b + +#define REG_A2XX_RB_PERFCOUNTER2_LOW 0x00000f0c + +#define REG_A2XX_RB_PERFCOUNTER2_HI 0x00000f0d + +#define REG_A2XX_RB_PERFCOUNTER3_LOW 0x00000f0e + +#define REG_A2XX_RB_PERFCOUNTER3_HI 0x00000f0f + +#define REG_A2XX_SQ_TEX_0 0x00000000 +#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003 +#define A2XX_SQ_TEX_0_TYPE__SHIFT 0 +static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val) +{ + return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK; +} +#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c +#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2 +static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val) +{ + return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK; +} +#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030 +#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4 +static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val) +{ + return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK; +} +#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0 +#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6 +static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val) +{ + return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK; +} +#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300 +#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8 +static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val) +{ + return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK; +} +#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00 +#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10 +static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val) +{ + return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK; +} +#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000 +#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13 +static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val) +{ + return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK; +} +#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000 +#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16 +static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val) +{ + return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK; +} +#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000 +#define A2XX_SQ_TEX_0_PITCH__SHIFT 22 +static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val) +{ + return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK; +} +#define A2XX_SQ_TEX_0_TILED 0x80000000 + +#define REG_A2XX_SQ_TEX_1 0x00000001 +#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f +#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0 +static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val) +{ + return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK; +} +#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0 +#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6 +static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val) +{ + return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK; +} +#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300 +#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8 +static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK; +} +#define A2XX_SQ_TEX_1_STACKED 0x00000400 +#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800 +#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11 +static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val) +{ + return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK; +} +#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000 +#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12 +static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val) +{ + return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK; +} + +#define REG_A2XX_SQ_TEX_2 0x00000002 +#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff +#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0 +static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK; +} +#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000 +#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13 +static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK; +} +#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000 +#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26 +static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK; +} + +#define REG_A2XX_SQ_TEX_3 0x00000003 +#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001 +#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0 +static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val) +{ + return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK; +} +#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e +#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK; +} +#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070 +#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK; +} +#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380 +#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK; +} +#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00 +#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10 +static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val) +{ + return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK; +} +#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000 +#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13 +static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(int32_t val) +{ + return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK; +} +#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000 +#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19 +static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val) +{ + return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK; +} +#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000 +#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21 +static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val) +{ + return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK; +} +#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000 +#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23 +static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val) +{ + return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK; +} +#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000 +#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25 +static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val) +{ + return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK; +} +#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000 +#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31 +static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK; +} + +#define REG_A2XX_SQ_TEX_4 0x00000004 +#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001 +#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0 +static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val) +{ + return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK; +} +#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002 +#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1 +static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val) +{ + return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK; +} +#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c +#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2 +static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK; +} +#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0 +#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6 +static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK; +} +#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400 +#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800 +#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000 +#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12 +static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val) +{ + return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK; +} +#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000 +#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22 +static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK; +} +#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000 +#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27 +static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK; +} + +#define REG_A2XX_SQ_TEX_5 0x00000005 +#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003 +#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0 +static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val) +{ + return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK; +} +#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004 +#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018 +#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3 +static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val) +{ + return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK; +} +#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0 +#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5 +static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val) +{ + return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK; +} +#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600 +#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9 +static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val) +{ + return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK; +} +#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800 +#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000 +#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12 +static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val) +{ + return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK; +} + + +#endif /* A2XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c new file mode 100644 index 000000000..2428d6ac5 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ + +#include "a2xx_gpu.h" +#include "msm_gem.h" +#include "msm_mmu.h" + +extern bool hang_debug; + +static void a2xx_dump(struct msm_gpu *gpu); +static bool a2xx_idle(struct msm_gpu *gpu); + +static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + OUT_PKT3(ring, CP_INTERRUPT, 1); + OUT_RING(ring, 0x80000000); + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); +} + +static bool a2xx_me_init(struct msm_gpu *gpu) +{ + struct msm_ringbuffer *ring = gpu->rb[0]; + + OUT_PKT3(ring, CP_ME_INIT, 18); + + /* All fields present (bits 9:0) */ + OUT_RING(ring, 0x000003ff); + /* Disable/Enable Real-Time Stream processing (present but ignored) */ + OUT_RING(ring, 0x00000000); + /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */ + OUT_RING(ring, 0x00000000); + + OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000); + OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000); + OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000); + OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000); + OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000); + OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000); + OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000); + OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000); + + /* Vertex and Pixel Shader Start Addresses in instructions + * (3 DWORDS per instruction) */ + OUT_RING(ring, 0x80000180); + /* Maximum Contexts */ + OUT_RING(ring, 0x00000001); + /* Write Confirm Interval and The CP will wait the + * wait_interval * 16 clocks between polling */ + OUT_RING(ring, 0x00000000); + /* NQ and External Memory Swap */ + OUT_RING(ring, 0x00000000); + /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */ + OUT_RING(ring, 0x200001f2); + /* Disable header dumping and Header dump address */ + OUT_RING(ring, 0x00000000); + /* Header dump size */ + OUT_RING(ring, 0x00000000); + + /* enable protected mode */ + OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); + return a2xx_idle(gpu); +} + +static int a2xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + dma_addr_t pt_base, tran_error; + uint32_t *ptr, len; + int i, ret; + + msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error); + + DBG("%s", gpu->name); + + /* halt ME to avoid ucode upload issues on a20x */ + gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT); + + gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe); + gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff); + + /* note: kgsl uses 0x00000001 after first reset on a22x */ + gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff); + msleep(30); + gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000); + + if (adreno_is_a225(adreno_gpu)) + gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000); + + /* note: kgsl uses 0x0000ffff for a20x */ + gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442); + + /* MPU: physical range */ + gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000); + gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000); + + gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE | + A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) | + A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG)); + + /* same as parameters in adreno_gpu */ + gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M | + A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff)); + + gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base); + gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error); + + gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE, + A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL | + A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC); + + gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG, + A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) | + A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE | + A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE | + A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) | + A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE | + A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE | + A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE | + A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) | + A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE | + A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE | + A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE | + A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE | + A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE); + if (!adreno_is_a20x(adreno_gpu)) + gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07); + + gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000); + gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000); + + gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */ + gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */ + + /* note: gsl doesn't set this */ + gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000); + + gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL, + A2XX_RBBM_INT_CNTL_RDERR_INT_MASK); + gpu_write(gpu, REG_AXXX_CP_INT_CNTL, + AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK | + AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK | + AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK | + AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK | + AXXX_CP_INT_CNTL_IB_ERROR_MASK | + AXXX_CP_INT_CNTL_IB1_INT_MASK | + AXXX_CP_INT_CNTL_RB_INT_MASK); + gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0); + gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK, + A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR | + A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR | + A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT); + + for (i = 3; i <= 5; i++) + if ((SZ_16K << i) == adreno_gpu->gmem) + break; + gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i); + + ret = adreno_hw_init(gpu); + if (ret) + return ret; + + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + + /* NOTE: PM4/micro-engine firmware registers look to be the same + * for a2xx and a3xx.. we could possibly push that part down to + * adreno_gpu base class. Or push both PM4 and PFP but + * parameterize the pfp ucode addr/data registers.. + */ + + /* Load PM4: */ + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; + DBG("loading PM4 ucode version: %x", ptr[1]); + + gpu_write(gpu, REG_AXXX_CP_DEBUG, + AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE); + gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]); + + /* Load PFP: */ + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); + len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; + DBG("loading PFP ucode version: %x", ptr[5]); + + gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]); + + gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804); + + /* clear ME_HALT to start micro engine */ + gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); + + return a2xx_me_init(gpu) ? 0 : -EINVAL; +} + +static void a2xx_recover(struct msm_gpu *gpu) +{ + int i; + + adreno_dump_info(gpu); + + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); + } + + /* dump registers before resetting gpu, if enabled: */ + if (hang_debug) + a2xx_dump(gpu); + + gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1); + gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET); + gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0); + adreno_recover(gpu); +} + +static void a2xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu); + + DBG("%s", gpu->name); + + adreno_gpu_cleanup(adreno_gpu); + + kfree(a2xx_gpu); +} + +static bool a2xx_idle(struct msm_gpu *gpu) +{ + /* wait for ringbuffer to drain: */ + if (!adreno_idle(gpu, gpu->rb[0])) + return false; + + /* then wait for GPU to finish: */ + if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) & + A2XX_RBBM_STATUS_GUI_ACTIVE))) { + DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); + + /* TODO maybe we need to reset GPU here to recover from hang? */ + return false; + } + + return true; +} + +static irqreturn_t a2xx_irq(struct msm_gpu *gpu) +{ + uint32_t mstatus, status; + + mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL); + + if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) { + status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS); + + dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status); + dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n", + gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT)); + + gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status); + } + + if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) { + status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS); + + /* only RB_INT is expected */ + if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK) + dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status); + + gpu_write(gpu, REG_AXXX_CP_INT_ACK, status); + } + + if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) { + status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS); + + dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status); + + gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status); + } + + msm_gpu_retire(gpu); + + return IRQ_HANDLED; +} + +static const unsigned int a200_registers[] = { + 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044, + 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9, + 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7, + 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5, + 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444, + 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B, + 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0, + 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614, + 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45, + 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C, + 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94, + 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06, + 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4, + 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E, + 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7, + 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12, + 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F, + 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, + 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294, + 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326, + 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482, + 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, + 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708, + 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783, + 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908, + ~0 /* sentinel */ +}; + +static const unsigned int a220_registers[] = { + 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044, + 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9, + 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7, + 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5, + 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444, + 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B, + 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0, + 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614, + 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43, + 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39, + 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, + 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, + 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, + 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, + 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002, + 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109, + 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202, + 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294, + 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316, + 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402, + 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509, + 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602, + 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694, + 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D, + 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805, + 0x4900, 0x4900, 0x4908, 0x4908, + ~0 /* sentinel */ +}; + +static const unsigned int a225_registers[] = { + 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044, + 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1, + 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA, + 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392, + 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB, + 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F, + 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587, + 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609, + 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, + 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, + 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C, + 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06, + 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4, + 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E, + 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7, + 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082, + 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, + 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222, + 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A, + 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326, + 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F, + 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, + 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610, + 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697, + 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D, + 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783, + 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900, + 0x4908, 0x4908, + ~0 /* sentinel */ +}; + +/* would be nice to not have to duplicate the _show() stuff with printk(): */ +static void a2xx_dump(struct msm_gpu *gpu) +{ + printk("status: %08x\n", + gpu_read(gpu, REG_A2XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu) +{ + struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (!state) + return ERR_PTR(-ENOMEM); + + adreno_gpu_state_get(gpu, state); + + state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS); + + return state; +} + +static struct msm_gem_address_space * +a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +{ + struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu); + struct msm_gem_address_space *aspace; + + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, + 0xfff * SZ_64K); + + if (IS_ERR(aspace) && !IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + + return aspace; +} + +static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); + return ring->memptrs->rptr; +} + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a2xx_hw_init, + .pm_suspend = msm_gpu_pm_suspend, + .pm_resume = msm_gpu_pm_resume, + .recover = a2xx_recover, + .submit = a2xx_submit, + .active_ring = adreno_active_ring, + .irq = a2xx_irq, + .destroy = a2xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = adreno_show, +#endif + .gpu_state_get = a2xx_gpu_state_get, + .gpu_state_put = adreno_gpu_state_put, + .create_address_space = a2xx_create_address_space, + .get_rptr = a2xx_get_rptr, + }, +}; + +static const struct msm_gpu_perfcntr perfcntrs[] = { +/* TODO */ +}; + +struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) +{ + struct a2xx_gpu *a2xx_gpu = NULL; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + int ret; + + if (!pdev) { + dev_err(dev->dev, "no a2xx device\n"); + ret = -ENXIO; + goto fail; + } + + a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL); + if (!a2xx_gpu) { + ret = -ENOMEM; + goto fail; + } + + adreno_gpu = &a2xx_gpu->base; + gpu = &adreno_gpu->base; + + gpu->perfcntrs = perfcntrs; + gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + if (ret) + goto fail; + + if (adreno_is_a20x(adreno_gpu)) + adreno_gpu->registers = a200_registers; + else if (adreno_is_a225(adreno_gpu)) + adreno_gpu->registers = a225_registers; + else + adreno_gpu->registers = a220_registers; + + if (!gpu->aspace) { + dev_err(dev->dev, "No memory protection without MMU\n"); + if (!allow_vram_carveout) { + ret = -ENXIO; + goto fail; + } + } + + return gpu; + +fail: + if (a2xx_gpu) + a2xx_destroy(&a2xx_gpu->base.base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h new file mode 100644 index 000000000..02fba2cb8 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */ + +#ifndef __A2XX_GPU_H__ +#define __A2XX_GPU_H__ + +#include "adreno_gpu.h" + +/* arrg, somehow fb.h is getting pulled in: */ +#undef ROP_COPY +#undef ROP_XOR + +#include "a2xx.xml.h" + +struct a2xx_gpu { + struct adreno_gpu base; + bool pm_enabled; +}; +#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base) + +#endif /* __A2XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h new file mode 100644 index 000000000..520ae3f37 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h @@ -0,0 +1,3247 @@ +#ifndef A3XX_XML +#define A3XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a3xx_tile_mode { + LINEAR = 0, + TILE_4X4 = 1, + TILE_32X32 = 2, + TILE_4X2 = 3, +}; + +enum a3xx_state_block_id { + HLSQ_BLOCK_ID_TP_TEX = 2, + HLSQ_BLOCK_ID_TP_MIPMAP = 3, + HLSQ_BLOCK_ID_SP_VS = 4, + HLSQ_BLOCK_ID_SP_FS = 6, +}; + +enum a3xx_cache_opcode { + INVALIDATE = 1, +}; + +enum a3xx_vtx_fmt { + VFMT_32_FLOAT = 0, + VFMT_32_32_FLOAT = 1, + VFMT_32_32_32_FLOAT = 2, + VFMT_32_32_32_32_FLOAT = 3, + VFMT_16_FLOAT = 4, + VFMT_16_16_FLOAT = 5, + VFMT_16_16_16_FLOAT = 6, + VFMT_16_16_16_16_FLOAT = 7, + VFMT_32_FIXED = 8, + VFMT_32_32_FIXED = 9, + VFMT_32_32_32_FIXED = 10, + VFMT_32_32_32_32_FIXED = 11, + VFMT_16_SINT = 16, + VFMT_16_16_SINT = 17, + VFMT_16_16_16_SINT = 18, + VFMT_16_16_16_16_SINT = 19, + VFMT_16_UINT = 20, + VFMT_16_16_UINT = 21, + VFMT_16_16_16_UINT = 22, + VFMT_16_16_16_16_UINT = 23, + VFMT_16_SNORM = 24, + VFMT_16_16_SNORM = 25, + VFMT_16_16_16_SNORM = 26, + VFMT_16_16_16_16_SNORM = 27, + VFMT_16_UNORM = 28, + VFMT_16_16_UNORM = 29, + VFMT_16_16_16_UNORM = 30, + VFMT_16_16_16_16_UNORM = 31, + VFMT_32_UINT = 32, + VFMT_32_32_UINT = 33, + VFMT_32_32_32_UINT = 34, + VFMT_32_32_32_32_UINT = 35, + VFMT_32_SINT = 36, + VFMT_32_32_SINT = 37, + VFMT_32_32_32_SINT = 38, + VFMT_32_32_32_32_SINT = 39, + VFMT_8_UINT = 40, + VFMT_8_8_UINT = 41, + VFMT_8_8_8_UINT = 42, + VFMT_8_8_8_8_UINT = 43, + VFMT_8_UNORM = 44, + VFMT_8_8_UNORM = 45, + VFMT_8_8_8_UNORM = 46, + VFMT_8_8_8_8_UNORM = 47, + VFMT_8_SINT = 48, + VFMT_8_8_SINT = 49, + VFMT_8_8_8_SINT = 50, + VFMT_8_8_8_8_SINT = 51, + VFMT_8_SNORM = 52, + VFMT_8_8_SNORM = 53, + VFMT_8_8_8_SNORM = 54, + VFMT_8_8_8_8_SNORM = 55, + VFMT_10_10_10_2_UINT = 56, + VFMT_10_10_10_2_UNORM = 57, + VFMT_10_10_10_2_SINT = 58, + VFMT_10_10_10_2_SNORM = 59, + VFMT_2_10_10_10_UINT = 60, + VFMT_2_10_10_10_UNORM = 61, + VFMT_2_10_10_10_SINT = 62, + VFMT_2_10_10_10_SNORM = 63, + VFMT_NONE = 255, +}; + +enum a3xx_tex_fmt { + TFMT_5_6_5_UNORM = 4, + TFMT_5_5_5_1_UNORM = 5, + TFMT_4_4_4_4_UNORM = 7, + TFMT_Z16_UNORM = 9, + TFMT_X8Z24_UNORM = 10, + TFMT_Z32_FLOAT = 11, + TFMT_UV_64X32 = 16, + TFMT_VU_64X32 = 17, + TFMT_Y_64X32 = 18, + TFMT_NV12_64X32 = 19, + TFMT_UV_LINEAR = 20, + TFMT_VU_LINEAR = 21, + TFMT_Y_LINEAR = 22, + TFMT_NV12_LINEAR = 23, + TFMT_I420_Y = 24, + TFMT_I420_U = 26, + TFMT_I420_V = 27, + TFMT_ATC_RGB = 32, + TFMT_ATC_RGBA_EXPLICIT = 33, + TFMT_ETC1 = 34, + TFMT_ATC_RGBA_INTERPOLATED = 35, + TFMT_DXT1 = 36, + TFMT_DXT3 = 37, + TFMT_DXT5 = 38, + TFMT_2_10_10_10_UNORM = 40, + TFMT_10_10_10_2_UNORM = 41, + TFMT_9_9_9_E5_FLOAT = 42, + TFMT_11_11_10_FLOAT = 43, + TFMT_A8_UNORM = 44, + TFMT_L8_UNORM = 45, + TFMT_L8_A8_UNORM = 47, + TFMT_8_UNORM = 48, + TFMT_8_8_UNORM = 49, + TFMT_8_8_8_UNORM = 50, + TFMT_8_8_8_8_UNORM = 51, + TFMT_8_SNORM = 52, + TFMT_8_8_SNORM = 53, + TFMT_8_8_8_SNORM = 54, + TFMT_8_8_8_8_SNORM = 55, + TFMT_8_UINT = 56, + TFMT_8_8_UINT = 57, + TFMT_8_8_8_UINT = 58, + TFMT_8_8_8_8_UINT = 59, + TFMT_8_SINT = 60, + TFMT_8_8_SINT = 61, + TFMT_8_8_8_SINT = 62, + TFMT_8_8_8_8_SINT = 63, + TFMT_16_FLOAT = 64, + TFMT_16_16_FLOAT = 65, + TFMT_16_16_16_16_FLOAT = 67, + TFMT_16_UINT = 68, + TFMT_16_16_UINT = 69, + TFMT_16_16_16_16_UINT = 71, + TFMT_16_SINT = 72, + TFMT_16_16_SINT = 73, + TFMT_16_16_16_16_SINT = 75, + TFMT_16_UNORM = 76, + TFMT_16_16_UNORM = 77, + TFMT_16_16_16_16_UNORM = 79, + TFMT_16_SNORM = 80, + TFMT_16_16_SNORM = 81, + TFMT_16_16_16_16_SNORM = 83, + TFMT_32_FLOAT = 84, + TFMT_32_32_FLOAT = 85, + TFMT_32_32_32_32_FLOAT = 87, + TFMT_32_UINT = 88, + TFMT_32_32_UINT = 89, + TFMT_32_32_32_32_UINT = 91, + TFMT_32_SINT = 92, + TFMT_32_32_SINT = 93, + TFMT_32_32_32_32_SINT = 95, + TFMT_2_10_10_10_UINT = 96, + TFMT_10_10_10_2_UINT = 97, + TFMT_ETC2_RG11_SNORM = 112, + TFMT_ETC2_RG11_UNORM = 113, + TFMT_ETC2_R11_SNORM = 114, + TFMT_ETC2_R11_UNORM = 115, + TFMT_ETC2_RGBA8 = 116, + TFMT_ETC2_RGB8A1 = 117, + TFMT_ETC2_RGB8 = 118, + TFMT_NONE = 255, +}; + +enum a3xx_color_fmt { + RB_R5G6B5_UNORM = 0, + RB_R5G5B5A1_UNORM = 1, + RB_R4G4B4A4_UNORM = 3, + RB_R8G8B8_UNORM = 4, + RB_R8G8B8A8_UNORM = 8, + RB_R8G8B8A8_SNORM = 9, + RB_R8G8B8A8_UINT = 10, + RB_R8G8B8A8_SINT = 11, + RB_R8G8_UNORM = 12, + RB_R8G8_SNORM = 13, + RB_R8G8_UINT = 14, + RB_R8G8_SINT = 15, + RB_R10G10B10A2_UNORM = 16, + RB_A2R10G10B10_UNORM = 17, + RB_R10G10B10A2_UINT = 18, + RB_A2R10G10B10_UINT = 19, + RB_A8_UNORM = 20, + RB_R8_UNORM = 21, + RB_R16_FLOAT = 24, + RB_R16G16_FLOAT = 25, + RB_R16G16B16A16_FLOAT = 27, + RB_R11G11B10_FLOAT = 28, + RB_R16_SNORM = 32, + RB_R16G16_SNORM = 33, + RB_R16G16B16A16_SNORM = 35, + RB_R16_UNORM = 36, + RB_R16G16_UNORM = 37, + RB_R16G16B16A16_UNORM = 39, + RB_R16_SINT = 40, + RB_R16G16_SINT = 41, + RB_R16G16B16A16_SINT = 43, + RB_R16_UINT = 44, + RB_R16G16_UINT = 45, + RB_R16G16B16A16_UINT = 47, + RB_R32_FLOAT = 48, + RB_R32G32_FLOAT = 49, + RB_R32G32B32A32_FLOAT = 51, + RB_R32_SINT = 52, + RB_R32G32_SINT = 53, + RB_R32G32B32A32_SINT = 55, + RB_R32_UINT = 56, + RB_R32G32_UINT = 57, + RB_R32G32B32A32_UINT = 59, + RB_NONE = 255, +}; + +enum a3xx_cp_perfcounter_select { + CP_ALWAYS_COUNT = 0, + CP_AHB_PFPTRANS_WAIT = 3, + CP_AHB_NRTTRANS_WAIT = 6, + CP_CSF_NRT_READ_WAIT = 8, + CP_CSF_I1_FIFO_FULL = 9, + CP_CSF_I2_FIFO_FULL = 10, + CP_CSF_ST_FIFO_FULL = 11, + CP_RESERVED_12 = 12, + CP_CSF_RING_ROQ_FULL = 13, + CP_CSF_I1_ROQ_FULL = 14, + CP_CSF_I2_ROQ_FULL = 15, + CP_CSF_ST_ROQ_FULL = 16, + CP_RESERVED_17 = 17, + CP_MIU_TAG_MEM_FULL = 18, + CP_MIU_NRT_WRITE_STALLED = 22, + CP_MIU_NRT_READ_STALLED = 23, + CP_ME_REGS_RB_DONE_FIFO_FULL = 26, + CP_ME_REGS_VS_EVENT_FIFO_FULL = 27, + CP_ME_REGS_PS_EVENT_FIFO_FULL = 28, + CP_ME_REGS_CF_EVENT_FIFO_FULL = 29, + CP_ME_MICRO_RB_STARVED = 30, + CP_AHB_RBBM_DWORD_SENT = 40, + CP_ME_BUSY_CLOCKS = 41, + CP_ME_WAIT_CONTEXT_AVAIL = 42, + CP_PFP_TYPE0_PACKET = 43, + CP_PFP_TYPE3_PACKET = 44, + CP_CSF_RB_WPTR_NEQ_RPTR = 45, + CP_CSF_I1_SIZE_NEQ_ZERO = 46, + CP_CSF_I2_SIZE_NEQ_ZERO = 47, + CP_CSF_RBI1I2_FETCHING = 48, +}; + +enum a3xx_gras_tse_perfcounter_select { + GRAS_TSEPERF_INPUT_PRIM = 0, + GRAS_TSEPERF_INPUT_NULL_PRIM = 1, + GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2, + GRAS_TSEPERF_CLIPPED_PRIM = 3, + GRAS_TSEPERF_NEW_PRIM = 4, + GRAS_TSEPERF_ZERO_AREA_PRIM = 5, + GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6, + GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7, + GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8, + GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9, + GRAS_TSEPERF_PRE_CLIP_PRIM = 10, + GRAS_TSEPERF_POST_CLIP_PRIM = 11, + GRAS_TSEPERF_WORKING_CYCLES = 12, + GRAS_TSEPERF_PC_STARVE = 13, + GRAS_TSERASPERF_STALL = 14, +}; + +enum a3xx_gras_ras_perfcounter_select { + GRAS_RASPERF_16X16_TILES = 0, + GRAS_RASPERF_8X8_TILES = 1, + GRAS_RASPERF_4X4_TILES = 2, + GRAS_RASPERF_WORKING_CYCLES = 3, + GRAS_RASPERF_STALL_CYCLES_BY_RB = 4, + GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5, + GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6, +}; + +enum a3xx_hlsq_perfcounter_select { + HLSQ_PERF_SP_VS_CONSTANT = 0, + HLSQ_PERF_SP_VS_INSTRUCTIONS = 1, + HLSQ_PERF_SP_FS_CONSTANT = 2, + HLSQ_PERF_SP_FS_INSTRUCTIONS = 3, + HLSQ_PERF_TP_STATE = 4, + HLSQ_PERF_QUADS = 5, + HLSQ_PERF_PIXELS = 6, + HLSQ_PERF_VERTICES = 7, + HLSQ_PERF_FS8_THREADS = 8, + HLSQ_PERF_FS16_THREADS = 9, + HLSQ_PERF_FS32_THREADS = 10, + HLSQ_PERF_VS8_THREADS = 11, + HLSQ_PERF_VS16_THREADS = 12, + HLSQ_PERF_SP_VS_DATA_BYTES = 13, + HLSQ_PERF_SP_FS_DATA_BYTES = 14, + HLSQ_PERF_ACTIVE_CYCLES = 15, + HLSQ_PERF_STALL_CYCLES_SP_STATE = 16, + HLSQ_PERF_STALL_CYCLES_SP_VS = 17, + HLSQ_PERF_STALL_CYCLES_SP_FS = 18, + HLSQ_PERF_STALL_CYCLES_UCHE = 19, + HLSQ_PERF_RBBM_LOAD_CYCLES = 20, + HLSQ_PERF_DI_TO_VS_START_SP0 = 21, + HLSQ_PERF_DI_TO_FS_START_SP0 = 22, + HLSQ_PERF_VS_START_TO_DONE_SP0 = 23, + HLSQ_PERF_FS_START_TO_DONE_SP0 = 24, + HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25, + HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26, + HLSQ_PERF_UCHE_LATENCY_CYCLES = 27, + HLSQ_PERF_UCHE_LATENCY_COUNT = 28, +}; + +enum a3xx_pc_perfcounter_select { + PC_PCPERF_VISIBILITY_STREAMS = 0, + PC_PCPERF_TOTAL_INSTANCES = 1, + PC_PCPERF_PRIMITIVES_PC_VPC = 2, + PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3, + PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4, + PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5, + PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6, + PC_PCPERF_VERTICES_TO_VFD = 7, + PC_PCPERF_REUSED_VERTICES = 8, + PC_PCPERF_CYCLES_STALLED_BY_VFD = 9, + PC_PCPERF_CYCLES_STALLED_BY_TSE = 10, + PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11, + PC_PCPERF_CYCLES_IS_WORKING = 12, +}; + +enum a3xx_rb_perfcounter_select { + RB_RBPERF_ACTIVE_CYCLES_ANY = 0, + RB_RBPERF_ACTIVE_CYCLES_ALL = 1, + RB_RBPERF_STARVE_CYCLES_BY_SP = 2, + RB_RBPERF_STARVE_CYCLES_BY_RAS = 3, + RB_RBPERF_STARVE_CYCLES_BY_MARB = 4, + RB_RBPERF_STALL_CYCLES_BY_MARB = 5, + RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6, + RB_RBPERF_RB_MARB_DATA = 7, + RB_RBPERF_SP_RB_QUAD = 8, + RB_RBPERF_RAS_EARLY_Z_QUADS = 9, + RB_RBPERF_GMEM_CH0_READ = 10, + RB_RBPERF_GMEM_CH1_READ = 11, + RB_RBPERF_GMEM_CH0_WRITE = 12, + RB_RBPERF_GMEM_CH1_WRITE = 13, + RB_RBPERF_CP_CONTEXT_DONE = 14, + RB_RBPERF_CP_CACHE_FLUSH = 15, + RB_RBPERF_CP_ZPASS_DONE = 16, +}; + +enum a3xx_rbbm_perfcounter_select { + RBBM_ALAWYS_ON = 0, + RBBM_VBIF_BUSY = 1, + RBBM_TSE_BUSY = 2, + RBBM_RAS_BUSY = 3, + RBBM_PC_DCALL_BUSY = 4, + RBBM_PC_VSD_BUSY = 5, + RBBM_VFD_BUSY = 6, + RBBM_VPC_BUSY = 7, + RBBM_UCHE_BUSY = 8, + RBBM_VSC_BUSY = 9, + RBBM_HLSQ_BUSY = 10, + RBBM_ANY_RB_BUSY = 11, + RBBM_ANY_TEX_BUSY = 12, + RBBM_ANY_USP_BUSY = 13, + RBBM_ANY_MARB_BUSY = 14, + RBBM_ANY_ARB_BUSY = 15, + RBBM_AHB_STATUS_BUSY = 16, + RBBM_AHB_STATUS_STALLED = 17, + RBBM_AHB_STATUS_TXFR = 18, + RBBM_AHB_STATUS_TXFR_SPLIT = 19, + RBBM_AHB_STATUS_TXFR_ERROR = 20, + RBBM_AHB_STATUS_LONG_STALL = 21, + RBBM_RBBM_STATUS_MASKED = 22, +}; + +enum a3xx_sp_perfcounter_select { + SP_LM_LOAD_INSTRUCTIONS = 0, + SP_LM_STORE_INSTRUCTIONS = 1, + SP_LM_ATOMICS = 2, + SP_UCHE_LOAD_INSTRUCTIONS = 3, + SP_UCHE_STORE_INSTRUCTIONS = 4, + SP_UCHE_ATOMICS = 5, + SP_VS_TEX_INSTRUCTIONS = 6, + SP_VS_CFLOW_INSTRUCTIONS = 7, + SP_VS_EFU_INSTRUCTIONS = 8, + SP_VS_FULL_ALU_INSTRUCTIONS = 9, + SP_VS_HALF_ALU_INSTRUCTIONS = 10, + SP_FS_TEX_INSTRUCTIONS = 11, + SP_FS_CFLOW_INSTRUCTIONS = 12, + SP_FS_EFU_INSTRUCTIONS = 13, + SP_FS_FULL_ALU_INSTRUCTIONS = 14, + SP_FS_HALF_ALU_INSTRUCTIONS = 15, + SP_FS_BARY_INSTRUCTIONS = 16, + SP_VS_INSTRUCTIONS = 17, + SP_FS_INSTRUCTIONS = 18, + SP_ADDR_LOCK_COUNT = 19, + SP_UCHE_READ_TRANS = 20, + SP_UCHE_WRITE_TRANS = 21, + SP_EXPORT_VPC_TRANS = 22, + SP_EXPORT_RB_TRANS = 23, + SP_PIXELS_KILLED = 24, + SP_ICL1_REQUESTS = 25, + SP_ICL1_MISSES = 26, + SP_ICL0_REQUESTS = 27, + SP_ICL0_MISSES = 28, + SP_ALU_ACTIVE_CYCLES = 29, + SP_EFU_ACTIVE_CYCLES = 30, + SP_STALL_CYCLES_BY_VPC = 31, + SP_STALL_CYCLES_BY_TP = 32, + SP_STALL_CYCLES_BY_UCHE = 33, + SP_STALL_CYCLES_BY_RB = 34, + SP_ACTIVE_CYCLES_ANY = 35, + SP_ACTIVE_CYCLES_ALL = 36, +}; + +enum a3xx_tp_perfcounter_select { + TPL1_TPPERF_L1_REQUESTS = 0, + TPL1_TPPERF_TP0_L1_REQUESTS = 1, + TPL1_TPPERF_TP0_L1_MISSES = 2, + TPL1_TPPERF_TP1_L1_REQUESTS = 3, + TPL1_TPPERF_TP1_L1_MISSES = 4, + TPL1_TPPERF_TP2_L1_REQUESTS = 5, + TPL1_TPPERF_TP2_L1_MISSES = 6, + TPL1_TPPERF_TP3_L1_REQUESTS = 7, + TPL1_TPPERF_TP3_L1_MISSES = 8, + TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9, + TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10, + TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11, + TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12, + TPL1_TPPERF_BILINEAR_OPS = 13, + TPL1_TPPERF_QUADSQUADS_OFFSET = 14, + TPL1_TPPERF_QUADQUADS_SHADOW = 15, + TPL1_TPPERF_QUADS_ARRAY = 16, + TPL1_TPPERF_QUADS_PROJECTION = 17, + TPL1_TPPERF_QUADS_GRADIENT = 18, + TPL1_TPPERF_QUADS_1D2D = 19, + TPL1_TPPERF_QUADS_3DCUBE = 20, + TPL1_TPPERF_ZERO_LOD = 21, + TPL1_TPPERF_OUTPUT_TEXELS = 22, + TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23, + TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24, + TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25, + TPL1_TPPERF_LATENCY = 26, + TPL1_TPPERF_LATENCY_TRANS = 27, +}; + +enum a3xx_vfd_perfcounter_select { + VFD_PERF_UCHE_BYTE_FETCHED = 0, + VFD_PERF_UCHE_TRANS = 1, + VFD_PERF_VPC_BYPASS_COMPONENTS = 2, + VFD_PERF_FETCH_INSTRUCTIONS = 3, + VFD_PERF_DECODE_INSTRUCTIONS = 4, + VFD_PERF_ACTIVE_CYCLES = 5, + VFD_PERF_STALL_CYCLES_UCHE = 6, + VFD_PERF_STALL_CYCLES_HLSQ = 7, + VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8, + VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9, +}; + +enum a3xx_vpc_perfcounter_select { + VPC_PERF_SP_LM_PRIMITIVES = 0, + VPC_PERF_COMPONENTS_FROM_SP = 1, + VPC_PERF_SP_LM_COMPONENTS = 2, + VPC_PERF_ACTIVE_CYCLES = 3, + VPC_PERF_STALL_CYCLES_LM = 4, + VPC_PERF_STALL_CYCLES_RAS = 5, +}; + +enum a3xx_uche_perfcounter_select { + UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0, + UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1, + UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2, + UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3, + UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4, + UCHE_UCHEPERF_READ_REQUESTS_TP = 8, + UCHE_UCHEPERF_READ_REQUESTS_VFD = 9, + UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10, + UCHE_UCHEPERF_READ_REQUESTS_MARB = 11, + UCHE_UCHEPERF_READ_REQUESTS_SP = 12, + UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13, + UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14, + UCHE_UCHEPERF_TAG_CHECK_FAILS = 15, + UCHE_UCHEPERF_EVICTS = 16, + UCHE_UCHEPERF_FLUSHES = 17, + UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18, + UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19, + UCHE_UCHEPERF_ACTIVE_CYCLES = 20, +}; + +enum a3xx_intp_mode { + SMOOTH = 0, + FLAT = 1, + ZERO = 2, + ONE = 3, +}; + +enum a3xx_repl_mode { + S = 1, + T = 2, + ONE_T = 3, +}; + +enum a3xx_tex_filter { + A3XX_TEX_NEAREST = 0, + A3XX_TEX_LINEAR = 1, + A3XX_TEX_ANISO = 2, +}; + +enum a3xx_tex_clamp { + A3XX_TEX_REPEAT = 0, + A3XX_TEX_CLAMP_TO_EDGE = 1, + A3XX_TEX_MIRROR_REPEAT = 2, + A3XX_TEX_CLAMP_TO_BORDER = 3, + A3XX_TEX_MIRROR_CLAMP = 4, +}; + +enum a3xx_tex_aniso { + A3XX_TEX_ANISO_1 = 0, + A3XX_TEX_ANISO_2 = 1, + A3XX_TEX_ANISO_4 = 2, + A3XX_TEX_ANISO_8 = 3, + A3XX_TEX_ANISO_16 = 4, +}; + +enum a3xx_tex_swiz { + A3XX_TEX_X = 0, + A3XX_TEX_Y = 1, + A3XX_TEX_Z = 2, + A3XX_TEX_W = 3, + A3XX_TEX_ZERO = 4, + A3XX_TEX_ONE = 5, +}; + +enum a3xx_tex_type { + A3XX_TEX_1D = 0, + A3XX_TEX_2D = 1, + A3XX_TEX_CUBE = 2, + A3XX_TEX_3D = 3, +}; + +enum a3xx_tex_msaa { + A3XX_TPL1_MSAA1X = 0, + A3XX_TPL1_MSAA2X = 1, + A3XX_TPL1_MSAA4X = 2, + A3XX_TPL1_MSAA8X = 3, +}; + +#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001 +#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002 +#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004 +#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 +#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 +#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020 +#define A3XX_INT0_VFD_ERROR 0x00000040 +#define A3XX_INT0_CP_SW_INT 0x00000080 +#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100 +#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200 +#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400 +#define A3XX_INT0_CP_HW_FAULT 0x00000800 +#define A3XX_INT0_CP_DMA 0x00001000 +#define A3XX_INT0_CP_IB2_INT 0x00002000 +#define A3XX_INT0_CP_IB1_INT 0x00004000 +#define A3XX_INT0_CP_RB_INT 0x00008000 +#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000 +#define A3XX_INT0_CP_RB_DONE_TS 0x00020000 +#define A3XX_INT0_CP_VS_DONE_TS 0x00040000 +#define A3XX_INT0_CP_PS_DONE_TS 0x00080000 +#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000 +#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000 +#define A3XX_INT0_MISC_HANG_DETECT 0x01000000 +#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000 +#define REG_A3XX_RBBM_HW_VERSION 0x00000000 + +#define REG_A3XX_RBBM_HW_RELEASE 0x00000001 + +#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002 + +#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010 + +#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012 + +#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018 + +#define REG_A3XX_RBBM_AHB_CTL0 0x00000020 + +#define REG_A3XX_RBBM_AHB_CTL1 0x00000021 + +#define REG_A3XX_RBBM_AHB_CMD 0x00000022 + +#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027 + +#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e + +#define REG_A3XX_RBBM_STATUS 0x00000030 +#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001 +#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002 +#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004 +#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000 +#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000 +#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000 +#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000 +#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000 +#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000 +#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000 +#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000 +#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000 +#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000 +#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000 +#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000 +#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000 +#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000 +#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000 +#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000 +#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 +#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000 + +#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040 + +#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033 + +#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057 + +#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a + +#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060 + +#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061 + +#define REG_A3XX_RBBM_INT_0_MASK 0x00000063 + +#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064 + +#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080 +#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084 + +#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085 + +#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086 + +#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087 + +#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088 + +#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090 + +#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094 + +#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095 + +#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096 + +#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097 + +#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098 + +#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099 + +#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a + +#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b + +#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c + +#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d + +#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e + +#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f + +#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0 + +#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9 + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac + +#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad + +#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae + +#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af + +#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0 + +#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1 + +#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2 + +#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3 + +#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4 + +#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5 + +#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6 + +#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7 + +#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8 + +#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba + +#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb + +#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc + +#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd + +#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be + +#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf + +#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4 + +#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5 + +#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6 + +#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7 + +#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8 + +#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9 + +#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca + +#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb + +#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc + +#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd + +#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce + +#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf + +#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0 + +#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1 + +#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2 + +#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3 + +#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4 + +#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5 + +#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6 + +#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7 + +#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8 + +#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9 + +#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da + +#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db + +#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc + +#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd + +#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de + +#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df + +#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0 + +#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1 + +#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2 + +#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3 + +#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4 + +#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5 + +#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea + +#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb + +#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec + +#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed + +#define REG_A3XX_RBBM_RBBM_CTL 0x00000100 + +#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111 + +#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112 + +#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9 + +#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca + +#define REG_A3XX_CP_ROQ_ADDR 0x000001cc + +#define REG_A3XX_CP_ROQ_DATA 0x000001cd + +#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1 + +#define REG_A3XX_CP_MERCIU_DATA 0x000001d2 + +#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3 + +#define REG_A3XX_CP_MEQ_ADDR 0x000001da + +#define REG_A3XX_CP_MEQ_DATA 0x000001db + +#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5 + +#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d + +#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445 + +#define REG_A3XX_CP_HW_FAULT 0x0000045c + +#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e + +#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f + +static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; } + +static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; } + +#define REG_A3XX_CP_AHB_FAULT 0x0000054d + +#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00 + +#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02 + +#define REG_A3XX_TP0_CHICKEN 0x00000e1e + +#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22 + +#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23 + +#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040 +#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000 +#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER 0x00002000 +#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID 0x00004000 +#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID 0x00008000 +#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000 +#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000 +#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000 +#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000 +#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000 +#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000 +#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK 0x1c000000 +#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT 26 +static inline uint32_t A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(uint32_t val) +{ + return ((val) << A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT) & A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK; +} + +#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044 +#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff +#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val) +{ + return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK; +} +#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00 +#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10 +static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val) +{ + return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048 +#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049 +#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a +#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b +#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c +#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK; +} + +#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d +#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff +#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0 +static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val) +{ + return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK; +} + +#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068 +#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK; +} +#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK; +} + +#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069 +#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff +#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val) +{ + return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK; +} + +#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c +#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff +#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) +{ + return ((((int32_t)(val * 1048576.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; +} + +#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d +#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff +#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) +{ + return ((((int32_t)(val * 64.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; +} + +#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 +#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 +#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 +#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 +#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 +#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 +static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) +{ + return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; +} +#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 + +#define REG_A3XX_GRAS_SC_CONTROL 0x00002072 +#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0 +#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4 +static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) +{ + return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; +} +#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00 +#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8 +static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; +} +#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 +#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 +static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; +} + +#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK; +} +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK; +} + +#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK; +} +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK; +} + +#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A3XX_RB_MODE_CONTROL 0x000020c0 +#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080 +#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700 +#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8 +static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val) +{ + return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK; +} +#define A3XX_RB_MODE_CONTROL_MRT__MASK 0x00003000 +#define A3XX_RB_MODE_CONTROL_MRT__SHIFT 12 +static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val) +{ + return ((val) << A3XX_RB_MODE_CONTROL_MRT__SHIFT) & A3XX_RB_MODE_CONTROL_MRT__MASK; +} +#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000 +#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 + +#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1 +#define A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE 0x00000001 +#define A3XX_RB_RENDER_CONTROL_YUV_IN_ENABLE 0x00000002 +#define A3XX_RB_RENDER_CONTROL_COV_VALUE_INPUT_ENABLE 0x00000004 +#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008 +#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0 +#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4 +static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK; +} +#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000 +#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000 +#define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK 0x0003c000 +#define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT 14 +static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val) +{ + return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK; +} +#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000 +#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000 +#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000 +#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000 +#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24 +static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK; +} +#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_COVERAGE 0x40000000 +#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_ONE 0x80000000 + +#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2 +#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400 +#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000 +#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12 +static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK; +} +#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000 +#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16 +static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val) +{ + return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK; +} + +#define REG_A3XX_RB_ALPHA_REF 0x000020c3 +#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00 +#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8 +static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK; +} +#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000 +#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK; +} + +static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; } + +static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; } +#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008 +#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010 +#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020 +#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 +#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 +static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) +{ + return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK; +} +#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000 +#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12 +static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK; +} +#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000 +#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24 +static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; +} + +static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; } +#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f +#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val) +{ + return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; +} +#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0 +#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val) +{ + return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; +} +#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; +} +#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000 +#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000 +#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17 +static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK; +} + +static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; } +#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0 +#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4 +static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK; +} + +static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; } +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 +#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 +#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 +static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; +} +#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000 + +#define REG_A3XX_RB_BLEND_RED 0x000020e4 +#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_RED_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK; +} +#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK; +} + +#define REG_A3XX_RB_BLEND_GREEN 0x000020e5 +#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK; +} +#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK; +} + +#define REG_A3XX_RB_BLEND_BLUE 0x000020e6 +#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK; +} +#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK; +} + +#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7 +#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff +#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0 +static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val) +{ + return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK; +} +#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 +#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 +static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK; +} + +#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8 + +#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9 + +#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea + +#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb + +#define REG_A3XX_RB_COPY_CONTROL 0x000020ec +#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 +#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0 +static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val) +{ + return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK; +} +#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008 +#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070 +#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4 +static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val) +{ + return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK; +} +#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080 +#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00 +#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8 +static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val) +{ + return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK; +} +#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000 +#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 +#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 +static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) +{ + return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK; +} + +#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed +#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0 +#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4 +static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK; +} + +#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee +#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff +#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0 +static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val) +{ + return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK; +} + +#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef +#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003 +#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc +#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 +#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 +#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000 +#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK; +} +#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000 +#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18 +static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val) +{ + return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK; +} + +#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100 +#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001 +#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002 +#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 +#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008 +#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070 +#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4 +static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK; +} +#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080 +#define A3XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000 + +#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101 + +#define REG_A3XX_RB_DEPTH_INFO 0x00002102 +#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003 +#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) +{ + return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; +} +#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800 +#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11 +static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) +{ + return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; +} + +#define REG_A3XX_RB_DEPTH_PITCH 0x00002103 +#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff +#define A3XX_RB_DEPTH_PITCH__SHIFT 0 +static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val) +{ + return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK; +} + +#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104 +#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 +#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 +#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 +#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 +#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 +#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 +#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 +#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 +#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 +#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 +#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; +} +#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 +#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 +static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; +} + +#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105 + +#define REG_A3XX_RB_STENCIL_INFO 0x00002106 +#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff800 +#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11 +static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val) +{ + return ((val >> 12) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK; +} + +#define REG_A3XX_RB_STENCIL_PITCH 0x00002107 +#define A3XX_RB_STENCIL_PITCH__MASK 0xffffffff +#define A3XX_RB_STENCIL_PITCH__SHIFT 0 +static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val) +{ + return ((val >> 3) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK; +} + +#define REG_A3XX_RB_STENCILREFMASK 0x00002108 +#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff +#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 +static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK; +} +#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 +#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 +static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK; +} +#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 +#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; +} + +#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109 +#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff +#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 +static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; +} +#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 +#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 +static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; +} +#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 +#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; +} + +#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c +#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002 + +#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e +#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff +#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val) +{ + return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK; +} +#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000 +#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val) +{ + return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK; +} + +#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110 +#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001 +#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 + +#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111 + +#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114 + +#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115 + +#define REG_A3XX_VGT_BIN_BASE 0x000021e1 + +#define REG_A3XX_VGT_BIN_SIZE 0x000021e2 + +#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4 +#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000 +#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16 +static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val) +{ + return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK; +} +#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000 +#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22 +static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val) +{ + return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK; +} + +#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea + +#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec +#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f +#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK; +} +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0 +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5 +static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK; +} +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700 +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8 +static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; +} +#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_ENABLE 0x00001000 +#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000 +#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 +#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 + +#define REG_A3XX_PC_RESTART_INDEX 0x000021ed + +#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200 +#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030 +#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4 +static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; +} +#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040 +#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100 +#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 +#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 +#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000 +#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12 +static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK; +} +#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000 +#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 +#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000 +#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27 +static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK; +} +#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000 +#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000 +#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000 +#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000 + +#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201 +#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0 +#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6 +static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK; +} +#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000 +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16 +static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000 +#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24 +static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK; +} + +#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202 +#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc +#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2 +static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000 +#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18 +static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 +#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26 +static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK; +} + +#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203 +#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK 0x000000ff +#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK 0x0000ff00 +#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT 8 +static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK 0x00ff0000 +#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT 16 +static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK; +} +#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK 0xff000000 +#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24 +static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK; +} + +#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204 +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000 +#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 +static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) +{ + return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK; +} +#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205 +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000 +#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12 +static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val) +{ + return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK; +} +#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206 +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK; +} +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000 +#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 +static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK; +} + +#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207 +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK; +} +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000 +#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16 +static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK; +} + +#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a +#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003 +#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK; +} +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK; +} +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000 +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK; +} +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000 +#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22 +static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val) +{ + return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK; +} + +static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; } + +static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; } + +static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; } + +#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211 + +#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212 + +#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214 + +static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; } + +static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; } + +#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216 + +#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217 + +#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a + +#define REG_A3XX_VFD_CONTROL_0 0x00002240 +#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff +#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0 +static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK; +} +#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000 +#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18 +static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK; +} +#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000 +#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22 +static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK; +} +#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000 +#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27 +static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK; +} + +#define REG_A3XX_VFD_CONTROL_1 0x00002241 +#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f +#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0 +static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK; +} +#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0 +#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4 +static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK; +} +#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00 +#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8 +static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK; +} +#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000 +#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16 +static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK; +} +#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000 +#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24 +static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val) +{ + return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK; +} + +#define REG_A3XX_VFD_INDEX_MIN 0x00002242 + +#define REG_A3XX_VFD_INDEX_MAX 0x00002243 + +#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244 + +#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245 + +static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; } + +static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; } +#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f +#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK; +} +#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80 +#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; +} +#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000 +#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000 +#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000 +#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK; +} +#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000 +#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24 +static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val) +{ + return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK; +} + +static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; } + +static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; } + +static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; } +#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f +#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0 +static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK; +} +#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010 +#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0 +#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6 +static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK; +} +#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000 +#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12 +static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK; +} +#define A3XX_VFD_DECODE_INSTR_INT 0x00100000 +#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 +#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 +static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK; +} +#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000 +#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24 +static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK; +} +#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000 +#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000 + +#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0 +static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val) +{ + return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK; +} +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00 +#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8 +static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val) +{ + return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK; +} + +#define REG_A3XX_VPC_ATTR 0x00002280 +#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff +#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0 +static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val) +{ + return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK; +} +#define A3XX_VPC_ATTR_PSIZE 0x00000200 +#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000 +#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12 +static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val) +{ + return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK; +} +#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000 +#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28 +static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val) +{ + return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK; +} + +#define REG_A3XX_VPC_PACK 0x00002281 +#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00 +#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8 +static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val) +{ + return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK; +} +#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000 +#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16 +static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val) +{ + return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK; +} + +static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; } + +static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; } +#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003 +#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c +#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030 +#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0 +#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300 +#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00 +#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000 +#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000 +#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000 +#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000 +#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000 +#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000 +#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000 +#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000 +#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000 +#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK; +} +#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000 +#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30 +static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val) +{ + return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK; +} + +static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; } + +static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; } +#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT 0 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C0(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK 0x0000000c +#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT 2 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C1(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK 0x00000030 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT 4 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C2(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK 0x000000c0 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT 6 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C3(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK 0x00000300 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT 8 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C4(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK 0x00000c00 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT 10 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C5(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK 0x00003000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT 12 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C6(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK 0x0000c000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT 14 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C7(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK 0x00030000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT 16 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C8(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK 0x000c0000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT 18 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C9(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK 0x00300000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT 20 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CA(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK 0x00c00000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT 22 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CB(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK 0x03000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT 24 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CC(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK 0x0c000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT 26 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CD(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK 0x30000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT 28 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CE(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK; +} +#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK 0xc0000000 +#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT 30 +static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CF(enum a3xx_repl_mode val) +{ + return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK; +} + +#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a + +#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b + +#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0 +#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000 +#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000 +#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18 +static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val) +{ + return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK; +} +#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000 +#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000 +#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20 +static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val) +{ + return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK; +} +#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000 +#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22 +static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val) +{ + return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK; +} + +#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4 +#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002 +#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008 +#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; +} +#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 +#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000 +#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24 +static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK; +} + +#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5 +#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff +#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK; +} +#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00 +#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK; +} +#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000 +#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24 +static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) +{ + return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK; +} + +#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6 +#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK; +} +#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00 +#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8 +static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK; +} +#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000 +#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000 +#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20 +static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK; +} + +static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; } +#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff +#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK; +} +#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100 +#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK; +} +#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 +#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK; +} +#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000 +#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; } +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000 +#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4 +#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff +#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0 +static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK; +} +#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5 + +#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6 +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK; +} +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00 +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8 +static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK; +} +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7 +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0 +static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val) +{ + return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK; +} +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0 +#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5 +static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val) +{ + return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK; +} + +#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8 + +#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df +#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff +#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK; +} + +#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0 +#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002 +#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008 +#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000 +#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000 +#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000 +#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; +} +#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000 +#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000 +#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000 +#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000 +#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24 +static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK; +} + +#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1 +#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff +#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK; +} +#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00 +#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK; +} +#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000 +#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK; +} +#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000 +#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24 +static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK; +} + +#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2 +#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff +#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0 +static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK; +} +#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3 + +#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4 +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK; +} +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00 +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8 +static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK; +} +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5 +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0 +static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val) +{ + return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK; +} +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0 +#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5 +static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val) +{ + return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK; +} + +#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6 + +#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8 + +#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9 + +#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec +#define A3XX_SP_FS_OUTPUT_REG_MRT__MASK 0x00000003 +#define A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0 +static inline uint32_t A3XX_SP_FS_OUTPUT_REG_MRT(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A3XX_SP_FS_OUTPUT_REG_MRT__MASK; +} +#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 +#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 +#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 +static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK; +} + +static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; } +#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff +#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0 +static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val) +{ + return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK; +} +#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100 +#define A3XX_SP_FS_MRT_REG_SINT 0x00000400 +#define A3XX_SP_FS_MRT_REG_UINT 0x00000800 + +static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; } + +static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; } +#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f +#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0 +static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val) +{ + return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK; +} + +#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff +#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff +#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0 +static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val) +{ + return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK; +} + +#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301 + +#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340 +#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff +#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 +static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK; +} +#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00 +#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8 +static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK; +} +#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000 +#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16 +static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK; +} + +#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341 + +#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342 +#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff +#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 +static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK; +} +#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00 +#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8 +static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK; +} +#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000 +#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16 +static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val) +{ + return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK; +} + +#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343 + +#define REG_A3XX_VBIF_CLKON 0x00003001 + +#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c + +#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d + +#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e + +#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c + +#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d + +#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a + +#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c + +#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d + +#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030 + +#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031 + +#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034 + +#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035 + +#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036 + +#define REG_A3XX_VBIF_ARB_CTL 0x0000303c + +#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 + +#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058 + +#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e + +#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f + +#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070 +#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001 +#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002 +#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004 +#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008 +#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010 + +#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071 +#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001 +#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002 +#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004 +#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008 +#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010 + +#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072 + +#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073 + +#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074 + +#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075 + +#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076 + +#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077 + +#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078 + +#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079 + +#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a + +#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b + +#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c + +#define REG_A3XX_VSC_BIN_SIZE 0x00000c01 +#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f +#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val) +{ + return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK; +} +#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 +#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5 +static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK; +} + +#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02 + +static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; } + +static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; } +#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff +#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK; +} +#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00 +#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK; +} +#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000 +#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK; +} +#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000 +#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24 +static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val) +{ + return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK; +} + +static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; } + +static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; } + +#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c +#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001 + +#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d + +#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48 + +#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49 + +#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a + +#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b + +#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81 + +#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88 + +#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89 + +#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a + +#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; } + +static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; } + +#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0 + +#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1 + +#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6 + +#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7 + +#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0 +#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff +#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0 +static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val) +{ + return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK; +} +#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000 +#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14 +static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val) +{ + return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK; +} + +#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00 + +#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01 + +#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02 + +#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03 + +#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04 + +#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05 + +#define REG_A3XX_UNKNOWN_0E43 0x00000e43 + +#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44 + +#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45 + +#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61 + +#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62 + +#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64 + +#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65 + +#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82 + +#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84 + +#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85 + +#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86 + +#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87 + +#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88 + +#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89 + +#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0 +#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff +#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0 +static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val) +{ + return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK; +} + +#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1 +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0 +static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val) +{ + return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK; +} +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000 +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28 +static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val) +{ + return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK; +} +#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000 + +#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6 + +#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4 + +#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5 + +#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6 + +#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7 + +#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8 + +#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9 + +#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca + +#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb + +#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0 + +#define REG_A3XX_UNKNOWN_0F03 0x00000f03 + +#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04 + +#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05 + +#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06 + +#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07 + +#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08 + +#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09 + +#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0 + +#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9 + +#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc +#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f +#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 +#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600 +#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800 +#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK; +} +#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 +#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 +#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000 +#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24 +static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val) +{ + return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK; +} + +#define REG_A3XX_VGT_IMMED_DATA 0x000021fd + +#define REG_A3XX_TEX_SAMP_0 0x00000000 +#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001 +#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002 +#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c +#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2 +static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val) +{ + return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK; +} +#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030 +#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4 +static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val) +{ + return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK; +} +#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0 +#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6 +static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val) +{ + return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK; +} +#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00 +#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9 +static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val) +{ + return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK; +} +#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000 +#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12 +static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val) +{ + return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK; +} +#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000 +#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15 +static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val) +{ + return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK; +} +#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000 +#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20 +static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val) +{ + return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK; +} +#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000 +#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 + +#define REG_A3XX_TEX_SAMP_1 0x00000001 +#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff +#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0 +static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val) +{ + return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK; +} +#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000 +#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12 +static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val) +{ + return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK; +} +#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000 +#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22 +static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) +{ + return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK; +} + +#define REG_A3XX_TEX_CONST_0 0x00000000 +#define A3XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003 +#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT 0 +static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val) +{ + return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK; +} +#define A3XX_TEX_CONST_0_SRGB 0x00000004 +#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 +#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK; +} +#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 +#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK; +} +#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 +#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK; +} +#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 +#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13 +static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val) +{ + return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK; +} +#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 +#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16 +static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK; +} +#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000 +#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20 +static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val) +{ + return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK; +} +#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000 +#define A3XX_TEX_CONST_0_FMT__SHIFT 22 +static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val) +{ + return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK; +} +#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000 +#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000 +#define A3XX_TEX_CONST_0_TYPE__SHIFT 30 +static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val) +{ + return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK; +} + +#define REG_A3XX_TEX_CONST_1 0x00000001 +#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff +#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0 +static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK; +} +#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000 +#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14 +static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK; +} +#define A3XX_TEX_CONST_1_PITCHALIGN__MASK 0xf0000000 +#define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT 28 +static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK; +} + +#define REG_A3XX_TEX_CONST_2 0x00000002 +#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff +#define A3XX_TEX_CONST_2_INDX__SHIFT 0 +static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK; +} +#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000 +#define A3XX_TEX_CONST_2_PITCH__SHIFT 12 +static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK; +} +#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000 +#define A3XX_TEX_CONST_2_SWAP__SHIFT 30 +static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK; +} + +#define REG_A3XX_TEX_CONST_3 0x00000003 +#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff +#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0 +static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val) +{ + return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK; +} +#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000 +#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17 +static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val) +{ + return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK; +} +#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000 +#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28 +static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val) +{ + return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK; +} + + +#endif /* A3XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c new file mode 100644 index 000000000..2c8b98996 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + */ + +#include "a3xx_gpu.h" + +#define A3XX_INT0_MASK \ + (A3XX_INT0_RBBM_AHB_ERROR | \ + A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \ + A3XX_INT0_CP_T0_PACKET_IN_IB | \ + A3XX_INT0_CP_OPCODE_ERROR | \ + A3XX_INT0_CP_RESERVED_BIT_ERROR | \ + A3XX_INT0_CP_HW_FAULT | \ + A3XX_INT0_CP_IB1_INT | \ + A3XX_INT0_CP_IB2_INT | \ + A3XX_INT0_CP_RB_INT | \ + A3XX_INT0_CP_REG_PROTECT_FAULT | \ + A3XX_INT0_CP_AHB_ERROR_HALT | \ + A3XX_INT0_CACHE_FLUSH_TS | \ + A3XX_INT0_UCHE_OOB_ACCESS) + +extern bool hang_debug; + +static void a3xx_dump(struct msm_gpu *gpu); +static bool a3xx_idle(struct msm_gpu *gpu); + +static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* Flush HLSQ lazy updates to make sure there is nothing + * pending for indirect loads after the timestamp has + * passed: + */ + OUT_PKT3(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, HLSQ_FLUSH); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + +#if 0 + /* Dummy set-constant to trigger context rollover */ + OUT_PKT3(ring, CP_SET_CONSTANT, 2); + OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); + OUT_RING(ring, 0x00000000); +#endif + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); +} + +static bool a3xx_me_init(struct msm_gpu *gpu) +{ + struct msm_ringbuffer *ring = gpu->rb[0]; + + OUT_PKT3(ring, CP_ME_INIT, 17); + OUT_RING(ring, 0x000003f7); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000080); + OUT_RING(ring, 0x00000100); + OUT_RING(ring, 0x00000180); + OUT_RING(ring, 0x00006600); + OUT_RING(ring, 0x00000150); + OUT_RING(ring, 0x0000014e); + OUT_RING(ring, 0x00000154); + OUT_RING(ring, 0x00000001); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); + return a3xx_idle(gpu); +} + +static int a3xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); + uint32_t *ptr, len; + int i, ret; + + DBG("%s", gpu->name); + + if (adreno_is_a305(adreno_gpu)) { + /* Set up 16 deep read/write request queues: */ + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); + /* Enable WR-REQ: */ + gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); + /* Set up round robin arbitration between both AXI ports: */ + gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); + /* Set up AOOO: */ + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); + } else if (adreno_is_a306(adreno_gpu)) { + gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a); + } else if (adreno_is_a320(adreno_gpu)) { + /* Set up 16 deep read/write request queues: */ + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010); + /* Enable WR-REQ: */ + gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff); + /* Set up round robin arbitration between both AXI ports: */ + gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); + /* Set up AOOO: */ + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); + /* Enable 1K sort: */ + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff); + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); + + } else if (adreno_is_a330v2(adreno_gpu)) { + /* + * Most of the VBIF registers on 8974v2 have the correct + * values at power on, so we won't modify those if we don't + * need to + */ + /* Enable 1k sort: */ + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); + /* Enable WR-REQ: */ + gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); + gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); + /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ + gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003); + + } else if (adreno_is_a330(adreno_gpu)) { + /* Set up 16 deep read/write request queues: */ + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818); + /* Enable WR-REQ: */ + gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f); + /* Set up round robin arbitration between both AXI ports: */ + gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030); + /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */ + gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001); + /* Set up AOOO: */ + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f); + gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f); + /* Enable 1K sort: */ + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f); + gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4); + /* Disable VBIF clock gating. This is to enable AXI running + * higher frequency than GPU: + */ + gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001); + + } else { + BUG(); + } + + /* Make all blocks contribute to the GPU BUSY perf counter: */ + gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff); + + /* Tune the hystersis counters for SP and CP idle detection: */ + gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10); + gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); + + /* Enable the RBBM error reporting bits. This lets us get + * useful information on failure: + */ + gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001); + + /* Enable AHB error reporting: */ + gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff); + + /* Turn on the power counters: */ + gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000); + + /* Turn on hang detection - this spews a lot of useful information + * into the RBBM registers on a hang: + */ + gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff); + + /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */ + gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); + + /* Enable Clock gating: */ + if (adreno_is_a306(adreno_gpu)) + gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); + else if (adreno_is_a320(adreno_gpu)) + gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); + else if (adreno_is_a330v2(adreno_gpu)) + gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); + else if (adreno_is_a330(adreno_gpu)) + gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff); + + if (adreno_is_a330v2(adreno_gpu)) + gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455); + else if (adreno_is_a330(adreno_gpu)) + gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000); + + /* Set the OCMEM base address for A330, etc */ + if (a3xx_gpu->ocmem.hdl) { + gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR, + (unsigned int)(a3xx_gpu->ocmem.base >> 14)); + } + + /* Turn on performance counters: */ + gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01); + + /* Enable the perfcntrs that we use.. */ + for (i = 0; i < gpu->num_perfcntrs; i++) { + const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i]; + gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val); + } + + gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK); + + ret = adreno_hw_init(gpu); + if (ret) + return ret; + + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_AXXX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + + /* setup access protection: */ + gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007); + + /* RBBM registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040); + gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080); + gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc); + gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108); + gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140); + gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400); + + /* CP registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700); + gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8); + gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0); + gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178); + gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180); + + /* RB registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300); + + /* VBIF registers */ + gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000); + + /* NOTE: PM4/micro-engine firmware registers look to be the same + * for a2xx and a3xx.. we could possibly push that part down to + * adreno_gpu base class. Or push both PM4 and PFP but + * parameterize the pfp ucode addr/data registers.. + */ + + /* Load PM4: */ + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; + DBG("loading PM4 ucode version: %x", ptr[1]); + + gpu_write(gpu, REG_AXXX_CP_DEBUG, + AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE | + AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE); + gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]); + + /* Load PFP: */ + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); + len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; + DBG("loading PFP ucode version: %x", ptr[5]); + + gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); + + /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ + if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) || + adreno_is_a320(adreno_gpu)) { + gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, + AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | + AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | + AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14)); + } else if (adreno_is_a330(adreno_gpu)) { + /* NOTE: this (value take from downstream android driver) + * includes some bits outside of the known bitfields. But + * A330 has this "MERCIU queue" thing too, which might + * explain a new bitfield or reshuffling: + */ + gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008); + } + + /* clear ME_HALT to start micro engine */ + gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0); + + return a3xx_me_init(gpu) ? 0 : -EINVAL; +} + +static void a3xx_recover(struct msm_gpu *gpu) +{ + int i; + + adreno_dump_info(gpu); + + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); + } + + /* dump registers before resetting gpu, if enabled: */ + if (hang_debug) + a3xx_dump(gpu); + + gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); + gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); + gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); + adreno_recover(gpu); +} + +static void a3xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu); + + DBG("%s", gpu->name); + + adreno_gpu_cleanup(adreno_gpu); + + adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem); + + kfree(a3xx_gpu); +} + +static bool a3xx_idle(struct msm_gpu *gpu) +{ + /* wait for ringbuffer to drain: */ + if (!adreno_idle(gpu, gpu->rb[0])) + return false; + + /* then wait for GPU to finish: */ + if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) & + A3XX_RBBM_STATUS_GPU_BUSY))) { + DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); + + /* TODO maybe we need to reset GPU here to recover from hang? */ + return false; + } + + return true; +} + +static irqreturn_t a3xx_irq(struct msm_gpu *gpu) +{ + uint32_t status; + + status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS); + DBG("%s: %08x", gpu->name, status); + + // TODO + + gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status); + + msm_gpu_retire(gpu); + + return IRQ_HANDLED; +} + +static const unsigned int a3xx_registers[] = { + 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027, + 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c, + 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5, + 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1, + 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd, + 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff, + 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f, + 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f, + 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e, + 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f, + 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7, + 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05, + 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65, + 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7, + 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09, + 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069, + 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075, + 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109, + 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115, + 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0, + 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e, + 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8, + 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7, + 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444, + 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, + 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, + 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, + 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, + 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, + 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, + 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, + 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, + 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d, + 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f, + ~0 /* sentinel */ +}; + +/* would be nice to not have to duplicate the _show() stuff with printk(): */ +static void a3xx_dump(struct msm_gpu *gpu) +{ + printk("status: %08x\n", + gpu_read(gpu, REG_A3XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu) +{ + struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (!state) + return ERR_PTR(-ENOMEM); + + adreno_gpu_state_get(gpu, state); + + state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS); + + return state; +} + +static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR); + return ring->memptrs->rptr; +} + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a3xx_hw_init, + .pm_suspend = msm_gpu_pm_suspend, + .pm_resume = msm_gpu_pm_resume, + .recover = a3xx_recover, + .submit = a3xx_submit, + .active_ring = adreno_active_ring, + .irq = a3xx_irq, + .destroy = a3xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = adreno_show, +#endif + .gpu_state_get = a3xx_gpu_state_get, + .gpu_state_put = adreno_gpu_state_put, + .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a3xx_get_rptr, + }, +}; + +static const struct msm_gpu_perfcntr perfcntrs[] = { + { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO, + SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" }, + { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO, + SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" }, +}; + +struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) +{ + struct a3xx_gpu *a3xx_gpu = NULL; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct icc_path *ocmem_icc_path; + struct icc_path *icc_path; + int ret; + + if (!pdev) { + DRM_DEV_ERROR(dev->dev, "no a3xx device\n"); + ret = -ENXIO; + goto fail; + } + + a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL); + if (!a3xx_gpu) { + ret = -ENOMEM; + goto fail; + } + + adreno_gpu = &a3xx_gpu->base; + gpu = &adreno_gpu->base; + + gpu->perfcntrs = perfcntrs; + gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); + + adreno_gpu->registers = a3xx_registers; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + if (ret) + goto fail; + + /* if needed, allocate gmem: */ + if (adreno_is_a330(adreno_gpu)) { + ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev, + adreno_gpu, &a3xx_gpu->ocmem); + if (ret) + goto fail; + } + + if (!gpu->aspace) { + /* TODO we think it is possible to configure the GPU to + * restrict access to VRAM carveout. But the required + * registers are unknown. For now just bail out and + * limp along with just modesetting. If it turns out + * to not be possible to restrict access, then we must + * implement a cmdstream validator. + */ + DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); + if (!allow_vram_carveout) { + ret = -ENXIO; + goto fail; + } + } + + icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); + if (IS_ERR(icc_path)) { + ret = PTR_ERR(icc_path); + goto fail; + } + + ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); + if (IS_ERR(ocmem_icc_path)) { + ret = PTR_ERR(ocmem_icc_path); + /* allow -ENODATA, ocmem icc is optional */ + if (ret != -ENODATA) + goto fail; + ocmem_icc_path = NULL; + } + + + /* + * Set the ICC path to maximum speed for now by multiplying the fastest + * frequency by the bus width (8). We'll want to scale this later on to + * improve battery life. + */ + icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + + return gpu; + +fail: + if (a3xx_gpu) + a3xx_destroy(&a3xx_gpu->base.base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h new file mode 100644 index 000000000..c555fb13e --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __A3XX_GPU_H__ +#define __A3XX_GPU_H__ + +#include "adreno_gpu.h" + +/* arrg, somehow fb.h is getting pulled in: */ +#undef ROP_COPY +#undef ROP_XOR + +#include "a3xx.xml.h" + +struct a3xx_gpu { + struct adreno_gpu base; + + /* if OCMEM is used for GMEM: */ + struct adreno_ocmem ocmem; +}; +#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base) + +#endif /* __A3XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h new file mode 100644 index 000000000..7e5c21015 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h @@ -0,0 +1,4349 @@ +#ifndef A4XX_XML +#define A4XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2022 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a4xx_color_fmt { + RB4_A8_UNORM = 1, + RB4_R8_UNORM = 2, + RB4_R8_SNORM = 3, + RB4_R8_UINT = 4, + RB4_R8_SINT = 5, + RB4_R4G4B4A4_UNORM = 8, + RB4_R5G5B5A1_UNORM = 10, + RB4_R5G6B5_UNORM = 14, + RB4_R8G8_UNORM = 15, + RB4_R8G8_SNORM = 16, + RB4_R8G8_UINT = 17, + RB4_R8G8_SINT = 18, + RB4_R16_UNORM = 19, + RB4_R16_SNORM = 20, + RB4_R16_FLOAT = 21, + RB4_R16_UINT = 22, + RB4_R16_SINT = 23, + RB4_R8G8B8_UNORM = 25, + RB4_R8G8B8A8_UNORM = 26, + RB4_R8G8B8A8_SNORM = 28, + RB4_R8G8B8A8_UINT = 29, + RB4_R8G8B8A8_SINT = 30, + RB4_R10G10B10A2_UNORM = 31, + RB4_R10G10B10A2_UINT = 34, + RB4_R11G11B10_FLOAT = 39, + RB4_R16G16_UNORM = 40, + RB4_R16G16_SNORM = 41, + RB4_R16G16_FLOAT = 42, + RB4_R16G16_UINT = 43, + RB4_R16G16_SINT = 44, + RB4_R32_FLOAT = 45, + RB4_R32_UINT = 46, + RB4_R32_SINT = 47, + RB4_R16G16B16A16_UNORM = 52, + RB4_R16G16B16A16_SNORM = 53, + RB4_R16G16B16A16_FLOAT = 54, + RB4_R16G16B16A16_UINT = 55, + RB4_R16G16B16A16_SINT = 56, + RB4_R32G32_FLOAT = 57, + RB4_R32G32_UINT = 58, + RB4_R32G32_SINT = 59, + RB4_R32G32B32A32_FLOAT = 60, + RB4_R32G32B32A32_UINT = 61, + RB4_R32G32B32A32_SINT = 62, + RB4_NONE = 255, +}; + +enum a4xx_tile_mode { + TILE4_LINEAR = 0, + TILE4_2 = 2, + TILE4_3 = 3, +}; + +enum a4xx_vtx_fmt { + VFMT4_32_FLOAT = 1, + VFMT4_32_32_FLOAT = 2, + VFMT4_32_32_32_FLOAT = 3, + VFMT4_32_32_32_32_FLOAT = 4, + VFMT4_16_FLOAT = 5, + VFMT4_16_16_FLOAT = 6, + VFMT4_16_16_16_FLOAT = 7, + VFMT4_16_16_16_16_FLOAT = 8, + VFMT4_32_FIXED = 9, + VFMT4_32_32_FIXED = 10, + VFMT4_32_32_32_FIXED = 11, + VFMT4_32_32_32_32_FIXED = 12, + VFMT4_11_11_10_FLOAT = 13, + VFMT4_16_SINT = 16, + VFMT4_16_16_SINT = 17, + VFMT4_16_16_16_SINT = 18, + VFMT4_16_16_16_16_SINT = 19, + VFMT4_16_UINT = 20, + VFMT4_16_16_UINT = 21, + VFMT4_16_16_16_UINT = 22, + VFMT4_16_16_16_16_UINT = 23, + VFMT4_16_SNORM = 24, + VFMT4_16_16_SNORM = 25, + VFMT4_16_16_16_SNORM = 26, + VFMT4_16_16_16_16_SNORM = 27, + VFMT4_16_UNORM = 28, + VFMT4_16_16_UNORM = 29, + VFMT4_16_16_16_UNORM = 30, + VFMT4_16_16_16_16_UNORM = 31, + VFMT4_32_UINT = 32, + VFMT4_32_32_UINT = 33, + VFMT4_32_32_32_UINT = 34, + VFMT4_32_32_32_32_UINT = 35, + VFMT4_32_SINT = 36, + VFMT4_32_32_SINT = 37, + VFMT4_32_32_32_SINT = 38, + VFMT4_32_32_32_32_SINT = 39, + VFMT4_8_UINT = 40, + VFMT4_8_8_UINT = 41, + VFMT4_8_8_8_UINT = 42, + VFMT4_8_8_8_8_UINT = 43, + VFMT4_8_UNORM = 44, + VFMT4_8_8_UNORM = 45, + VFMT4_8_8_8_UNORM = 46, + VFMT4_8_8_8_8_UNORM = 47, + VFMT4_8_SINT = 48, + VFMT4_8_8_SINT = 49, + VFMT4_8_8_8_SINT = 50, + VFMT4_8_8_8_8_SINT = 51, + VFMT4_8_SNORM = 52, + VFMT4_8_8_SNORM = 53, + VFMT4_8_8_8_SNORM = 54, + VFMT4_8_8_8_8_SNORM = 55, + VFMT4_10_10_10_2_UINT = 56, + VFMT4_10_10_10_2_UNORM = 57, + VFMT4_10_10_10_2_SINT = 58, + VFMT4_10_10_10_2_SNORM = 59, + VFMT4_2_10_10_10_UINT = 60, + VFMT4_2_10_10_10_UNORM = 61, + VFMT4_2_10_10_10_SINT = 62, + VFMT4_2_10_10_10_SNORM = 63, + VFMT4_NONE = 255, +}; + +enum a4xx_tex_fmt { + TFMT4_A8_UNORM = 3, + TFMT4_8_UNORM = 4, + TFMT4_8_SNORM = 5, + TFMT4_8_UINT = 6, + TFMT4_8_SINT = 7, + TFMT4_4_4_4_4_UNORM = 8, + TFMT4_5_5_5_1_UNORM = 9, + TFMT4_5_6_5_UNORM = 11, + TFMT4_L8_A8_UNORM = 13, + TFMT4_8_8_UNORM = 14, + TFMT4_8_8_SNORM = 15, + TFMT4_8_8_UINT = 16, + TFMT4_8_8_SINT = 17, + TFMT4_16_UNORM = 18, + TFMT4_16_SNORM = 19, + TFMT4_16_FLOAT = 20, + TFMT4_16_UINT = 21, + TFMT4_16_SINT = 22, + TFMT4_8_8_8_8_UNORM = 28, + TFMT4_8_8_8_8_SNORM = 29, + TFMT4_8_8_8_8_UINT = 30, + TFMT4_8_8_8_8_SINT = 31, + TFMT4_9_9_9_E5_FLOAT = 32, + TFMT4_10_10_10_2_UNORM = 33, + TFMT4_10_10_10_2_UINT = 34, + TFMT4_11_11_10_FLOAT = 37, + TFMT4_16_16_UNORM = 38, + TFMT4_16_16_SNORM = 39, + TFMT4_16_16_FLOAT = 40, + TFMT4_16_16_UINT = 41, + TFMT4_16_16_SINT = 42, + TFMT4_32_FLOAT = 43, + TFMT4_32_UINT = 44, + TFMT4_32_SINT = 45, + TFMT4_16_16_16_16_UNORM = 51, + TFMT4_16_16_16_16_SNORM = 52, + TFMT4_16_16_16_16_FLOAT = 53, + TFMT4_16_16_16_16_UINT = 54, + TFMT4_16_16_16_16_SINT = 55, + TFMT4_32_32_FLOAT = 56, + TFMT4_32_32_UINT = 57, + TFMT4_32_32_SINT = 58, + TFMT4_32_32_32_FLOAT = 59, + TFMT4_32_32_32_UINT = 60, + TFMT4_32_32_32_SINT = 61, + TFMT4_32_32_32_32_FLOAT = 63, + TFMT4_32_32_32_32_UINT = 64, + TFMT4_32_32_32_32_SINT = 65, + TFMT4_X8Z24_UNORM = 71, + TFMT4_DXT1 = 86, + TFMT4_DXT3 = 87, + TFMT4_DXT5 = 88, + TFMT4_RGTC1_UNORM = 90, + TFMT4_RGTC1_SNORM = 91, + TFMT4_RGTC2_UNORM = 94, + TFMT4_RGTC2_SNORM = 95, + TFMT4_BPTC_UFLOAT = 97, + TFMT4_BPTC_FLOAT = 98, + TFMT4_BPTC = 99, + TFMT4_ATC_RGB = 100, + TFMT4_ATC_RGBA_EXPLICIT = 101, + TFMT4_ATC_RGBA_INTERPOLATED = 102, + TFMT4_ETC2_RG11_UNORM = 103, + TFMT4_ETC2_RG11_SNORM = 104, + TFMT4_ETC2_R11_UNORM = 105, + TFMT4_ETC2_R11_SNORM = 106, + TFMT4_ETC1 = 107, + TFMT4_ETC2_RGB8 = 108, + TFMT4_ETC2_RGBA8 = 109, + TFMT4_ETC2_RGB8A1 = 110, + TFMT4_ASTC_4x4 = 111, + TFMT4_ASTC_5x4 = 112, + TFMT4_ASTC_5x5 = 113, + TFMT4_ASTC_6x5 = 114, + TFMT4_ASTC_6x6 = 115, + TFMT4_ASTC_8x5 = 116, + TFMT4_ASTC_8x6 = 117, + TFMT4_ASTC_8x8 = 118, + TFMT4_ASTC_10x5 = 119, + TFMT4_ASTC_10x6 = 120, + TFMT4_ASTC_10x8 = 121, + TFMT4_ASTC_10x10 = 122, + TFMT4_ASTC_12x10 = 123, + TFMT4_ASTC_12x12 = 124, + TFMT4_NONE = 255, +}; + +enum a4xx_depth_format { + DEPTH4_NONE = 0, + DEPTH4_16 = 1, + DEPTH4_24_8 = 2, + DEPTH4_32 = 3, +}; + +enum a4xx_ccu_perfcounter_select { + CCU_BUSY_CYCLES = 0, + CCU_RB_DEPTH_RETURN_STALL = 2, + CCU_RB_COLOR_RETURN_STALL = 3, + CCU_DEPTH_BLOCKS = 6, + CCU_COLOR_BLOCKS = 7, + CCU_DEPTH_BLOCK_HIT = 8, + CCU_COLOR_BLOCK_HIT = 9, + CCU_DEPTH_FLAG1_COUNT = 10, + CCU_DEPTH_FLAG2_COUNT = 11, + CCU_DEPTH_FLAG3_COUNT = 12, + CCU_DEPTH_FLAG4_COUNT = 13, + CCU_COLOR_FLAG1_COUNT = 14, + CCU_COLOR_FLAG2_COUNT = 15, + CCU_COLOR_FLAG3_COUNT = 16, + CCU_COLOR_FLAG4_COUNT = 17, + CCU_PARTIAL_BLOCK_READ = 18, +}; + +enum a4xx_cp_perfcounter_select { + CP_ALWAYS_COUNT = 0, + CP_BUSY = 1, + CP_PFP_IDLE = 2, + CP_PFP_BUSY_WORKING = 3, + CP_PFP_STALL_CYCLES_ANY = 4, + CP_PFP_STARVE_CYCLES_ANY = 5, + CP_PFP_STARVED_PER_LOAD_ADDR = 6, + CP_PFP_STALLED_PER_STORE_ADDR = 7, + CP_PFP_PC_PROFILE = 8, + CP_PFP_MATCH_PM4_PKT_PROFILE = 9, + CP_PFP_COND_INDIRECT_DISCARDED = 10, + CP_LONG_RESUMPTIONS = 11, + CP_RESUME_CYCLES = 12, + CP_RESUME_TO_BOUNDARY_CYCLES = 13, + CP_LONG_PREEMPTIONS = 14, + CP_PREEMPT_CYCLES = 15, + CP_PREEMPT_TO_BOUNDARY_CYCLES = 16, + CP_ME_FIFO_EMPTY_PFP_IDLE = 17, + CP_ME_FIFO_EMPTY_PFP_BUSY = 18, + CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19, + CP_ME_FIFO_FULL_ME_BUSY = 20, + CP_ME_FIFO_FULL_ME_NON_WORKING = 21, + CP_ME_WAITING_FOR_PACKETS = 22, + CP_ME_BUSY_WORKING = 23, + CP_ME_STARVE_CYCLES_ANY = 24, + CP_ME_STARVE_CYCLES_PER_PROFILE = 25, + CP_ME_STALL_CYCLES_PER_PROFILE = 26, + CP_ME_PC_PROFILE = 27, + CP_RCIU_FIFO_EMPTY = 28, + CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29, + CP_RCIU_FIFO_FULL = 30, + CP_RCIU_FIFO_FULL_NO_CONTEXT = 31, + CP_RCIU_FIFO_FULL_AHB_MASTER = 32, + CP_RCIU_FIFO_FULL_OTHER = 33, + CP_AHB_IDLE = 34, + CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35, + CP_AHB_STALL_ON_GRANT_SPLIT = 36, + CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37, + CP_AHB_BUSY_WORKING = 38, + CP_AHB_BUSY_STALL_ON_HRDY = 39, + CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40, +}; + +enum a4xx_gras_ras_perfcounter_select { + RAS_SUPER_TILES = 0, + RAS_8X8_TILES = 1, + RAS_4X4_TILES = 2, + RAS_BUSY_CYCLES = 3, + RAS_STALL_CYCLES_BY_RB = 4, + RAS_STALL_CYCLES_BY_VSC = 5, + RAS_STARVE_CYCLES_BY_TSE = 6, + RAS_SUPERTILE_CYCLES = 7, + RAS_TILE_CYCLES = 8, + RAS_FULLY_COVERED_SUPER_TILES = 9, + RAS_FULLY_COVERED_8X8_TILES = 10, + RAS_4X4_PRIM = 11, + RAS_8X4_4X8_PRIM = 12, + RAS_8X8_PRIM = 13, +}; + +enum a4xx_gras_tse_perfcounter_select { + TSE_INPUT_PRIM = 0, + TSE_INPUT_NULL_PRIM = 1, + TSE_TRIVAL_REJ_PRIM = 2, + TSE_CLIPPED_PRIM = 3, + TSE_NEW_PRIM = 4, + TSE_ZERO_AREA_PRIM = 5, + TSE_FACENESS_CULLED_PRIM = 6, + TSE_ZERO_PIXEL_PRIM = 7, + TSE_OUTPUT_NULL_PRIM = 8, + TSE_OUTPUT_VISIBLE_PRIM = 9, + TSE_PRE_CLIP_PRIM = 10, + TSE_POST_CLIP_PRIM = 11, + TSE_BUSY_CYCLES = 12, + TSE_PC_STARVE = 13, + TSE_RAS_STALL = 14, + TSE_STALL_BARYPLANE_FIFO_FULL = 15, + TSE_STALL_ZPLANE_FIFO_FULL = 16, +}; + +enum a4xx_hlsq_perfcounter_select { + HLSQ_SP_VS_STAGE_CONSTANT = 0, + HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1, + HLSQ_SP_FS_STAGE_CONSTANT = 2, + HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3, + HLSQ_TP_STATE = 4, + HLSQ_QUADS = 5, + HLSQ_PIXELS = 6, + HLSQ_VERTICES = 7, + HLSQ_SP_VS_STAGE_DATA_BYTES = 13, + HLSQ_SP_FS_STAGE_DATA_BYTES = 14, + HLSQ_BUSY_CYCLES = 15, + HLSQ_STALL_CYCLES_SP_STATE = 16, + HLSQ_STALL_CYCLES_SP_VS_STAGE = 17, + HLSQ_STALL_CYCLES_SP_FS_STAGE = 18, + HLSQ_STALL_CYCLES_UCHE = 19, + HLSQ_RBBM_LOAD_CYCLES = 20, + HLSQ_DI_TO_VS_START_SP = 21, + HLSQ_DI_TO_FS_START_SP = 22, + HLSQ_VS_STAGE_START_TO_DONE_SP = 23, + HLSQ_FS_STAGE_START_TO_DONE_SP = 24, + HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25, + HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26, + HLSQ_UCHE_LATENCY_CYCLES = 27, + HLSQ_UCHE_LATENCY_COUNT = 28, + HLSQ_STARVE_CYCLES_VFD = 29, +}; + +enum a4xx_pc_perfcounter_select { + PC_VIS_STREAMS_LOADED = 0, + PC_VPC_PRIMITIVES = 2, + PC_DEAD_PRIM = 3, + PC_LIVE_PRIM = 4, + PC_DEAD_DRAWCALLS = 5, + PC_LIVE_DRAWCALLS = 6, + PC_VERTEX_MISSES = 7, + PC_STALL_CYCLES_VFD = 9, + PC_STALL_CYCLES_TSE = 10, + PC_STALL_CYCLES_UCHE = 11, + PC_WORKING_CYCLES = 12, + PC_IA_VERTICES = 13, + PC_GS_PRIMITIVES = 14, + PC_HS_INVOCATIONS = 15, + PC_DS_INVOCATIONS = 16, + PC_DS_PRIMITIVES = 17, + PC_STARVE_CYCLES_FOR_INDEX = 20, + PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21, + PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22, + PC_STALL_CYCLES_TESS = 23, + PC_STARVE_CYCLES_FOR_POSITION = 24, + PC_MODE0_DRAWCALL = 25, + PC_MODE1_DRAWCALL = 26, + PC_MODE2_DRAWCALL = 27, + PC_MODE3_DRAWCALL = 28, + PC_MODE4_DRAWCALL = 29, + PC_PREDICATED_DEAD_DRAWCALL = 30, + PC_STALL_CYCLES_BY_TSE_ONLY = 31, + PC_STALL_CYCLES_BY_VPC_ONLY = 32, + PC_VPC_POS_DATA_TRANSACTION = 33, + PC_BUSY_CYCLES = 34, + PC_STARVE_CYCLES_DI = 35, + PC_STALL_CYCLES_VPC = 36, + TESS_WORKING_CYCLES = 37, + TESS_NUM_CYCLES_SETUP_WORKING = 38, + TESS_NUM_CYCLES_PTGEN_WORKING = 39, + TESS_NUM_CYCLES_CONNGEN_WORKING = 40, + TESS_BUSY_CYCLES = 41, + TESS_STARVE_CYCLES_PC = 42, + TESS_STALL_CYCLES_PC = 43, +}; + +enum a4xx_pwr_perfcounter_select { + PWR_CORE_CLOCK_CYCLES = 0, + PWR_BUSY_CLOCK_CYCLES = 1, +}; + +enum a4xx_rb_perfcounter_select { + RB_BUSY_CYCLES = 0, + RB_BUSY_CYCLES_BINNING = 1, + RB_BUSY_CYCLES_RENDERING = 2, + RB_BUSY_CYCLES_RESOLVE = 3, + RB_STARVE_CYCLES_BY_SP = 4, + RB_STARVE_CYCLES_BY_RAS = 5, + RB_STARVE_CYCLES_BY_MARB = 6, + RB_STALL_CYCLES_BY_MARB = 7, + RB_STALL_CYCLES_BY_HLSQ = 8, + RB_RB_RB_MARB_DATA = 9, + RB_SP_RB_QUAD = 10, + RB_RAS_RB_Z_QUADS = 11, + RB_GMEM_CH0_READ = 12, + RB_GMEM_CH1_READ = 13, + RB_GMEM_CH0_WRITE = 14, + RB_GMEM_CH1_WRITE = 15, + RB_CP_CONTEXT_DONE = 16, + RB_CP_CACHE_FLUSH = 17, + RB_CP_ZPASS_DONE = 18, + RB_STALL_FIFO0_FULL = 19, + RB_STALL_FIFO1_FULL = 20, + RB_STALL_FIFO2_FULL = 21, + RB_STALL_FIFO3_FULL = 22, + RB_RB_HLSQ_TRANSACTIONS = 23, + RB_Z_READ = 24, + RB_Z_WRITE = 25, + RB_C_READ = 26, + RB_C_WRITE = 27, + RB_C_READ_LATENCY = 28, + RB_Z_READ_LATENCY = 29, + RB_STALL_BY_UCHE = 30, + RB_MARB_UCHE_TRANSACTIONS = 31, + RB_CACHE_STALL_MISS = 32, + RB_CACHE_STALL_FIFO_FULL = 33, + RB_8BIT_BLENDER_UNITS_ACTIVE = 34, + RB_16BIT_BLENDER_UNITS_ACTIVE = 35, + RB_SAMPLER_UNITS_ACTIVE = 36, + RB_TOTAL_PASS = 38, + RB_Z_PASS = 39, + RB_Z_FAIL = 40, + RB_S_FAIL = 41, + RB_POWER0 = 42, + RB_POWER1 = 43, + RB_POWER2 = 44, + RB_POWER3 = 45, + RB_POWER4 = 46, + RB_POWER5 = 47, + RB_POWER6 = 48, + RB_POWER7 = 49, +}; + +enum a4xx_rbbm_perfcounter_select { + RBBM_ALWAYS_ON = 0, + RBBM_VBIF_BUSY = 1, + RBBM_TSE_BUSY = 2, + RBBM_RAS_BUSY = 3, + RBBM_PC_DCALL_BUSY = 4, + RBBM_PC_VSD_BUSY = 5, + RBBM_VFD_BUSY = 6, + RBBM_VPC_BUSY = 7, + RBBM_UCHE_BUSY = 8, + RBBM_VSC_BUSY = 9, + RBBM_HLSQ_BUSY = 10, + RBBM_ANY_RB_BUSY = 11, + RBBM_ANY_TPL1_BUSY = 12, + RBBM_ANY_SP_BUSY = 13, + RBBM_ANY_MARB_BUSY = 14, + RBBM_ANY_ARB_BUSY = 15, + RBBM_AHB_STATUS_BUSY = 16, + RBBM_AHB_STATUS_STALLED = 17, + RBBM_AHB_STATUS_TXFR = 18, + RBBM_AHB_STATUS_TXFR_SPLIT = 19, + RBBM_AHB_STATUS_TXFR_ERROR = 20, + RBBM_AHB_STATUS_LONG_STALL = 21, + RBBM_STATUS_MASKED = 22, + RBBM_CP_BUSY_GFX_CORE_IDLE = 23, + RBBM_TESS_BUSY = 24, + RBBM_COM_BUSY = 25, + RBBM_DCOM_BUSY = 32, + RBBM_ANY_CCU_BUSY = 33, + RBBM_DPM_BUSY = 34, +}; + +enum a4xx_sp_perfcounter_select { + SP_LM_LOAD_INSTRUCTIONS = 0, + SP_LM_STORE_INSTRUCTIONS = 1, + SP_LM_ATOMICS = 2, + SP_GM_LOAD_INSTRUCTIONS = 3, + SP_GM_STORE_INSTRUCTIONS = 4, + SP_GM_ATOMICS = 5, + SP_VS_STAGE_TEX_INSTRUCTIONS = 6, + SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7, + SP_VS_STAGE_EFU_INSTRUCTIONS = 8, + SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9, + SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10, + SP_FS_STAGE_TEX_INSTRUCTIONS = 11, + SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12, + SP_FS_STAGE_EFU_INSTRUCTIONS = 13, + SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14, + SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15, + SP_VS_INSTRUCTIONS = 17, + SP_FS_INSTRUCTIONS = 18, + SP_ADDR_LOCK_COUNT = 19, + SP_UCHE_READ_TRANS = 20, + SP_UCHE_WRITE_TRANS = 21, + SP_EXPORT_VPC_TRANS = 22, + SP_EXPORT_RB_TRANS = 23, + SP_PIXELS_KILLED = 24, + SP_ICL1_REQUESTS = 25, + SP_ICL1_MISSES = 26, + SP_ICL0_REQUESTS = 27, + SP_ICL0_MISSES = 28, + SP_ALU_WORKING_CYCLES = 29, + SP_EFU_WORKING_CYCLES = 30, + SP_STALL_CYCLES_BY_VPC = 31, + SP_STALL_CYCLES_BY_TP = 32, + SP_STALL_CYCLES_BY_UCHE = 33, + SP_STALL_CYCLES_BY_RB = 34, + SP_BUSY_CYCLES = 35, + SP_HS_INSTRUCTIONS = 36, + SP_DS_INSTRUCTIONS = 37, + SP_GS_INSTRUCTIONS = 38, + SP_CS_INSTRUCTIONS = 39, + SP_SCHEDULER_NON_WORKING = 40, + SP_WAVE_CONTEXTS = 41, + SP_WAVE_CONTEXT_CYCLES = 42, + SP_POWER0 = 43, + SP_POWER1 = 44, + SP_POWER2 = 45, + SP_POWER3 = 46, + SP_POWER4 = 47, + SP_POWER5 = 48, + SP_POWER6 = 49, + SP_POWER7 = 50, + SP_POWER8 = 51, + SP_POWER9 = 52, + SP_POWER10 = 53, + SP_POWER11 = 54, + SP_POWER12 = 55, + SP_POWER13 = 56, + SP_POWER14 = 57, + SP_POWER15 = 58, +}; + +enum a4xx_tp_perfcounter_select { + TP_L1_REQUESTS = 0, + TP_L1_MISSES = 1, + TP_QUADS_OFFSET = 8, + TP_QUAD_SHADOW = 9, + TP_QUADS_ARRAY = 10, + TP_QUADS_GRADIENT = 11, + TP_QUADS_1D2D = 12, + TP_QUADS_3DCUBE = 13, + TP_BUSY_CYCLES = 16, + TP_STALL_CYCLES_BY_ARB = 17, + TP_STATE_CACHE_REQUESTS = 20, + TP_STATE_CACHE_MISSES = 21, + TP_POWER0 = 22, + TP_POWER1 = 23, + TP_POWER2 = 24, + TP_POWER3 = 25, + TP_POWER4 = 26, + TP_POWER5 = 27, + TP_POWER6 = 28, + TP_POWER7 = 29, +}; + +enum a4xx_uche_perfcounter_select { + UCHE_VBIF_READ_BEATS_TP = 0, + UCHE_VBIF_READ_BEATS_VFD = 1, + UCHE_VBIF_READ_BEATS_HLSQ = 2, + UCHE_VBIF_READ_BEATS_MARB = 3, + UCHE_VBIF_READ_BEATS_SP = 4, + UCHE_READ_REQUESTS_TP = 5, + UCHE_READ_REQUESTS_VFD = 6, + UCHE_READ_REQUESTS_HLSQ = 7, + UCHE_READ_REQUESTS_MARB = 8, + UCHE_READ_REQUESTS_SP = 9, + UCHE_WRITE_REQUESTS_MARB = 10, + UCHE_WRITE_REQUESTS_SP = 11, + UCHE_TAG_CHECK_FAILS = 12, + UCHE_EVICTS = 13, + UCHE_FLUSHES = 14, + UCHE_VBIF_LATENCY_CYCLES = 15, + UCHE_VBIF_LATENCY_SAMPLES = 16, + UCHE_BUSY_CYCLES = 17, + UCHE_VBIF_READ_BEATS_PC = 18, + UCHE_READ_REQUESTS_PC = 19, + UCHE_WRITE_REQUESTS_VPC = 20, + UCHE_STALL_BY_VBIF = 21, + UCHE_WRITE_REQUESTS_VSC = 22, + UCHE_POWER0 = 23, + UCHE_POWER1 = 24, + UCHE_POWER2 = 25, + UCHE_POWER3 = 26, + UCHE_POWER4 = 27, + UCHE_POWER5 = 28, + UCHE_POWER6 = 29, + UCHE_POWER7 = 30, +}; + +enum a4xx_vbif_perfcounter_select { + AXI_READ_REQUESTS_ID_0 = 0, + AXI_READ_REQUESTS_ID_1 = 1, + AXI_READ_REQUESTS_ID_2 = 2, + AXI_READ_REQUESTS_ID_3 = 3, + AXI_READ_REQUESTS_ID_4 = 4, + AXI_READ_REQUESTS_ID_5 = 5, + AXI_READ_REQUESTS_ID_6 = 6, + AXI_READ_REQUESTS_ID_7 = 7, + AXI_READ_REQUESTS_ID_8 = 8, + AXI_READ_REQUESTS_ID_9 = 9, + AXI_READ_REQUESTS_ID_10 = 10, + AXI_READ_REQUESTS_ID_11 = 11, + AXI_READ_REQUESTS_ID_12 = 12, + AXI_READ_REQUESTS_ID_13 = 13, + AXI_READ_REQUESTS_ID_14 = 14, + AXI_READ_REQUESTS_ID_15 = 15, + AXI0_READ_REQUESTS_TOTAL = 16, + AXI1_READ_REQUESTS_TOTAL = 17, + AXI2_READ_REQUESTS_TOTAL = 18, + AXI3_READ_REQUESTS_TOTAL = 19, + AXI_READ_REQUESTS_TOTAL = 20, + AXI_WRITE_REQUESTS_ID_0 = 21, + AXI_WRITE_REQUESTS_ID_1 = 22, + AXI_WRITE_REQUESTS_ID_2 = 23, + AXI_WRITE_REQUESTS_ID_3 = 24, + AXI_WRITE_REQUESTS_ID_4 = 25, + AXI_WRITE_REQUESTS_ID_5 = 26, + AXI_WRITE_REQUESTS_ID_6 = 27, + AXI_WRITE_REQUESTS_ID_7 = 28, + AXI_WRITE_REQUESTS_ID_8 = 29, + AXI_WRITE_REQUESTS_ID_9 = 30, + AXI_WRITE_REQUESTS_ID_10 = 31, + AXI_WRITE_REQUESTS_ID_11 = 32, + AXI_WRITE_REQUESTS_ID_12 = 33, + AXI_WRITE_REQUESTS_ID_13 = 34, + AXI_WRITE_REQUESTS_ID_14 = 35, + AXI_WRITE_REQUESTS_ID_15 = 36, + AXI0_WRITE_REQUESTS_TOTAL = 37, + AXI1_WRITE_REQUESTS_TOTAL = 38, + AXI2_WRITE_REQUESTS_TOTAL = 39, + AXI3_WRITE_REQUESTS_TOTAL = 40, + AXI_WRITE_REQUESTS_TOTAL = 41, + AXI_TOTAL_REQUESTS = 42, + AXI_READ_DATA_BEATS_ID_0 = 43, + AXI_READ_DATA_BEATS_ID_1 = 44, + AXI_READ_DATA_BEATS_ID_2 = 45, + AXI_READ_DATA_BEATS_ID_3 = 46, + AXI_READ_DATA_BEATS_ID_4 = 47, + AXI_READ_DATA_BEATS_ID_5 = 48, + AXI_READ_DATA_BEATS_ID_6 = 49, + AXI_READ_DATA_BEATS_ID_7 = 50, + AXI_READ_DATA_BEATS_ID_8 = 51, + AXI_READ_DATA_BEATS_ID_9 = 52, + AXI_READ_DATA_BEATS_ID_10 = 53, + AXI_READ_DATA_BEATS_ID_11 = 54, + AXI_READ_DATA_BEATS_ID_12 = 55, + AXI_READ_DATA_BEATS_ID_13 = 56, + AXI_READ_DATA_BEATS_ID_14 = 57, + AXI_READ_DATA_BEATS_ID_15 = 58, + AXI0_READ_DATA_BEATS_TOTAL = 59, + AXI1_READ_DATA_BEATS_TOTAL = 60, + AXI2_READ_DATA_BEATS_TOTAL = 61, + AXI3_READ_DATA_BEATS_TOTAL = 62, + AXI_READ_DATA_BEATS_TOTAL = 63, + AXI_WRITE_DATA_BEATS_ID_0 = 64, + AXI_WRITE_DATA_BEATS_ID_1 = 65, + AXI_WRITE_DATA_BEATS_ID_2 = 66, + AXI_WRITE_DATA_BEATS_ID_3 = 67, + AXI_WRITE_DATA_BEATS_ID_4 = 68, + AXI_WRITE_DATA_BEATS_ID_5 = 69, + AXI_WRITE_DATA_BEATS_ID_6 = 70, + AXI_WRITE_DATA_BEATS_ID_7 = 71, + AXI_WRITE_DATA_BEATS_ID_8 = 72, + AXI_WRITE_DATA_BEATS_ID_9 = 73, + AXI_WRITE_DATA_BEATS_ID_10 = 74, + AXI_WRITE_DATA_BEATS_ID_11 = 75, + AXI_WRITE_DATA_BEATS_ID_12 = 76, + AXI_WRITE_DATA_BEATS_ID_13 = 77, + AXI_WRITE_DATA_BEATS_ID_14 = 78, + AXI_WRITE_DATA_BEATS_ID_15 = 79, + AXI0_WRITE_DATA_BEATS_TOTAL = 80, + AXI1_WRITE_DATA_BEATS_TOTAL = 81, + AXI2_WRITE_DATA_BEATS_TOTAL = 82, + AXI3_WRITE_DATA_BEATS_TOTAL = 83, + AXI_WRITE_DATA_BEATS_TOTAL = 84, + AXI_DATA_BEATS_TOTAL = 85, + CYCLES_HELD_OFF_ID_0 = 86, + CYCLES_HELD_OFF_ID_1 = 87, + CYCLES_HELD_OFF_ID_2 = 88, + CYCLES_HELD_OFF_ID_3 = 89, + CYCLES_HELD_OFF_ID_4 = 90, + CYCLES_HELD_OFF_ID_5 = 91, + CYCLES_HELD_OFF_ID_6 = 92, + CYCLES_HELD_OFF_ID_7 = 93, + CYCLES_HELD_OFF_ID_8 = 94, + CYCLES_HELD_OFF_ID_9 = 95, + CYCLES_HELD_OFF_ID_10 = 96, + CYCLES_HELD_OFF_ID_11 = 97, + CYCLES_HELD_OFF_ID_12 = 98, + CYCLES_HELD_OFF_ID_13 = 99, + CYCLES_HELD_OFF_ID_14 = 100, + CYCLES_HELD_OFF_ID_15 = 101, + AXI_READ_REQUEST_HELD_OFF = 102, + AXI_WRITE_REQUEST_HELD_OFF = 103, + AXI_REQUEST_HELD_OFF = 104, + AXI_WRITE_DATA_HELD_OFF = 105, + OCMEM_AXI_READ_REQUEST_HELD_OFF = 106, + OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107, + OCMEM_AXI_REQUEST_HELD_OFF = 108, + OCMEM_AXI_WRITE_DATA_HELD_OFF = 109, + ELAPSED_CYCLES_DDR = 110, + ELAPSED_CYCLES_OCMEM = 111, +}; + +enum a4xx_vfd_perfcounter_select { + VFD_UCHE_BYTE_FETCHED = 0, + VFD_UCHE_TRANS = 1, + VFD_FETCH_INSTRUCTIONS = 3, + VFD_BUSY_CYCLES = 5, + VFD_STALL_CYCLES_UCHE = 6, + VFD_STALL_CYCLES_HLSQ = 7, + VFD_STALL_CYCLES_VPC_BYPASS = 8, + VFD_STALL_CYCLES_VPC_ALLOC = 9, + VFD_MODE_0_FIBERS = 13, + VFD_MODE_1_FIBERS = 14, + VFD_MODE_2_FIBERS = 15, + VFD_MODE_3_FIBERS = 16, + VFD_MODE_4_FIBERS = 17, + VFD_BFIFO_STALL = 18, + VFD_NUM_VERTICES_TOTAL = 19, + VFD_PACKER_FULL = 20, + VFD_UCHE_REQUEST_FIFO_FULL = 21, + VFD_STARVE_CYCLES_PC = 22, + VFD_STARVE_CYCLES_UCHE = 23, +}; + +enum a4xx_vpc_perfcounter_select { + VPC_SP_LM_COMPONENTS = 2, + VPC_SP0_LM_BYTES = 3, + VPC_SP1_LM_BYTES = 4, + VPC_SP2_LM_BYTES = 5, + VPC_SP3_LM_BYTES = 6, + VPC_WORKING_CYCLES = 7, + VPC_STALL_CYCLES_LM = 8, + VPC_STARVE_CYCLES_RAS = 9, + VPC_STREAMOUT_CYCLES = 10, + VPC_UCHE_TRANSACTIONS = 12, + VPC_STALL_CYCLES_UCHE = 13, + VPC_BUSY_CYCLES = 14, + VPC_STARVE_CYCLES_SP = 15, +}; + +enum a4xx_vsc_perfcounter_select { + VSC_BUSY_CYCLES = 0, + VSC_WORKING_CYCLES = 1, + VSC_STALL_CYCLES_UCHE = 2, + VSC_STARVE_CYCLES_RAS = 3, + VSC_EOT_NUM = 4, +}; + +enum a4xx_tex_filter { + A4XX_TEX_NEAREST = 0, + A4XX_TEX_LINEAR = 1, + A4XX_TEX_ANISO = 2, +}; + +enum a4xx_tex_clamp { + A4XX_TEX_REPEAT = 0, + A4XX_TEX_CLAMP_TO_EDGE = 1, + A4XX_TEX_MIRROR_REPEAT = 2, + A4XX_TEX_CLAMP_TO_BORDER = 3, + A4XX_TEX_MIRROR_CLAMP = 4, +}; + +enum a4xx_tex_aniso { + A4XX_TEX_ANISO_1 = 0, + A4XX_TEX_ANISO_2 = 1, + A4XX_TEX_ANISO_4 = 2, + A4XX_TEX_ANISO_8 = 3, + A4XX_TEX_ANISO_16 = 4, +}; + +enum a4xx_tex_swiz { + A4XX_TEX_X = 0, + A4XX_TEX_Y = 1, + A4XX_TEX_Z = 2, + A4XX_TEX_W = 3, + A4XX_TEX_ZERO = 4, + A4XX_TEX_ONE = 5, +}; + +enum a4xx_tex_type { + A4XX_TEX_1D = 0, + A4XX_TEX_2D = 1, + A4XX_TEX_CUBE = 2, + A4XX_TEX_3D = 3, + A4XX_TEX_BUFFER = 4, +}; + +#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000 +#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20 +static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val) +{ + return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK; +} +#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001 +#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002 +#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004 +#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 +#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 +#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020 +#define A4XX_INT0_VFD_ERROR 0x00000040 +#define A4XX_INT0_CP_SW_INT 0x00000080 +#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100 +#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200 +#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400 +#define A4XX_INT0_CP_HW_FAULT 0x00000800 +#define A4XX_INT0_CP_DMA 0x00001000 +#define A4XX_INT0_CP_IB2_INT 0x00002000 +#define A4XX_INT0_CP_IB1_INT 0x00004000 +#define A4XX_INT0_CP_RB_INT 0x00008000 +#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000 +#define A4XX_INT0_CP_RB_DONE_TS 0x00020000 +#define A4XX_INT0_CP_VS_DONE_TS 0x00040000 +#define A4XX_INT0_CP_PS_DONE_TS 0x00080000 +#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000 +#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000 +#define A4XX_INT0_MISC_HANG_DETECT 0x01000000 +#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000 +#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0 + +#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7 + +#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8 + +#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9 + +#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca + +#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb + +#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc + +#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd + +#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce + +#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf + +#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0 + +#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1 + +#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2 + +#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0 +#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff +#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0 +static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val) +{ + return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK; +} +#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000 +#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16 +static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val) +{ + return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK; +} + +#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc + +#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd + +#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce + +#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf + +#define REG_A4XX_RB_MODE_CONTROL 0x000020a0 +#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f +#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0 +static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK; +} +#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00 +#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8 +static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK; +} +#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000 + +#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1 +#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001 +#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020 + +#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2 +#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000 +#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000 +#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13 +static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val) +{ + return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK; +} + +#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3 +#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK 0x0000000f +#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT 0 +static inline uint32_t A4XX_RB_RENDER_CONTROL2_COORD_MASK(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT) & A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK; +} +#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010 +#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020 +#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040 +#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 +#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7 +static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK; +} +#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800 +#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_PIXEL 0x00001000 +#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_CENTROID 0x00002000 +#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE 0x00004000 +#define A4XX_RB_RENDER_CONTROL2_SIZE 0x00008000 + +static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; } + +static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; } +#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008 +#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010 +#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020 +#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040 +#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00 +#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8 +static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) +{ + return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK; +} +#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000 +#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24 +static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; +} + +static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; } +#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f +#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val) +{ + return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; +} +#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0 +#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6 +static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a4xx_tile_mode val) +{ + return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; +} +#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600 +#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9 +static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK; +} +#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800 +#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11 +static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; +} +#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000 +#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000 +#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14 +static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) +{ + return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK; +} + +static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; } + +static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; } +#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8 +#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3 +static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val) +{ + return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK; +} + +static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; } +#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f +#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; +} +#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 +#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; +} +#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 +#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; +} +#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 +#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; +} +#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 +#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; +} +#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 +#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 +static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; +} + +#define REG_A4XX_RB_BLEND_RED 0x000020f0 +#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff +#define A4XX_RB_BLEND_RED_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK; +} +#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_RED_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK; +} +#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1 +#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_RED_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_RED_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK; +} + +#define REG_A4XX_RB_BLEND_GREEN 0x000020f2 +#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff +#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK; +} +#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK; +} +#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3 +#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK; +} + +#define REG_A4XX_RB_BLEND_BLUE 0x000020f4 +#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff +#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK; +} +#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK; +} +#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5 +#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK; +} + +#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6 +#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff +#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK; +} +#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00 +#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8 +static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val) +{ + return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK; +} +#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 +#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 +static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK; +} + +#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7 +#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff +#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0 +static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val) +{ + return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK; +} + +#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8 +#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff +#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 +static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) +{ + return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; +} +#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 +#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 +#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 +static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) +{ + return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK; +} + +#define REG_A4XX_RB_FS_OUTPUT 0x000020f9 +#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK 0x000000ff +#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT 0 +static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val) +{ + return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK; +} +#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100 +#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000 +#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16 +static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val) +{ + return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK; +} + +#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa +#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 +#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc +#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2 +static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val) +{ + return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK; +} + +#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb +#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f +#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT0__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 +#define A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT1__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 +#define A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT2__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 +#define A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT3__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 +#define A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT4__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 +#define A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT5__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 +#define A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT6__MASK; +} +#define A4XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 +#define A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 +static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) +{ + return ((val) << A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT7__MASK; +} + +#define REG_A4XX_RB_COPY_CONTROL 0x000020fc +#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003 +#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0 +static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val) +{ + return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK; +} +#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070 +#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4 +static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val) +{ + return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK; +} +#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00 +#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8 +static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val) +{ + return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK; +} +#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 +#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 +static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) +{ + return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK; +} + +#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd +#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0 +#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5 +static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK; +} + +#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe +#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff +#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0 +static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK; +} + +#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff +#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc +#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2 +static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val) +{ + return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK; +} +#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300 +#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8 +static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK; +} +#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00 +#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10 +static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK; +} +#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000 +#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14 +static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK; +} +#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000 +#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18 +static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val) +{ + return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK; +} +#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000 +#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24 +static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val) +{ + return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK; +} + +#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100 +#define A4XX_RB_FS_OUTPUT_REG_MRT__MASK 0x0000000f +#define A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT 0 +static inline uint32_t A4XX_RB_FS_OUTPUT_REG_MRT(uint32_t val) +{ + return ((val) << A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_RB_FS_OUTPUT_REG_MRT__MASK; +} +#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020 + +#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101 +#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001 +#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002 +#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004 +#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070 +#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4 +static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK; +} +#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080 +#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000 +#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000 +#define A4XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000 + +#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102 + +#define REG_A4XX_RB_DEPTH_INFO 0x00002103 +#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003 +#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val) +{ + return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK; +} +#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000 +#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12 +static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val) +{ + return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK; +} + +#define REG_A4XX_RB_DEPTH_PITCH 0x00002104 +#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff +#define A4XX_RB_DEPTH_PITCH__SHIFT 0 +static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK; +} + +#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105 +#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff +#define A4XX_RB_DEPTH_PITCH2__SHIFT 0 +static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK; +} + +#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106 +#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 +#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 +#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 +#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 +#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK; +} +#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 +#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK; +} +#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 +#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK; +} +#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 +#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK; +} +#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 +#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; +} +#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 +#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; +} +#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 +#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; +} +#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 +#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 +static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; +} + +#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107 +#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001 + +#define REG_A4XX_RB_STENCIL_INFO 0x00002108 +#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 +#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000 +#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12 +static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val) +{ + return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK; +} + +#define REG_A4XX_RB_STENCIL_PITCH 0x00002109 +#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff +#define A4XX_RB_STENCIL_PITCH__SHIFT 0 +static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val) +{ + return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK; +} + +#define REG_A4XX_RB_STENCILREFMASK 0x0000210b +#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff +#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 +static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) +{ + return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK; +} +#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 +#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 +static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) +{ + return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK; +} +#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 +#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; +} + +#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c +#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff +#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 +static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) +{ + return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; +} +#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 +#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 +static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) +{ + return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; +} +#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 +#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; +} + +#define REG_A4XX_RB_BIN_OFFSET 0x0000210d +#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff +#define A4XX_RB_BIN_OFFSET_X__SHIFT 0 +static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val) +{ + return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK; +} +#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000 +#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16 +static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val) +{ + return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK; +} + +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP(uint32_t i0) { return 0x00002120 + 0x2*i0; } + +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; } + +static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; } + +#define REG_A4XX_RBBM_HW_VERSION 0x00000000 + +#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002 + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; } + +#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014 + +#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015 + +#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016 + +#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017 + +#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018 + +#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019 + +#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a + +#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b + +#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c + +#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d + +#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e + +#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f + +#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020 + +#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021 + +#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022 + +#define REG_A4XX_RBBM_AHB_CTL0 0x00000023 + +#define REG_A4XX_RBBM_AHB_CTL1 0x00000024 + +#define REG_A4XX_RBBM_AHB_CMD 0x00000025 + +#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026 + +#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028 + +#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b + +#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f + +#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034 + +#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036 + +#define REG_A4XX_RBBM_INT_0_MASK 0x00000037 + +#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e + +#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f + +#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041 + +#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042 + +#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045 + +#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047 + +#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049 + +#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a + +#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b + +#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c + +#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d + +#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098 +#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001 +#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000 + +#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c + +#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d + +#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e + +#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f + +#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0 + +#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1 + +#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2 + +#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3 + +#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4 + +#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5 + +#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6 + +#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7 + +#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8 + +#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9 + +#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa + +#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab + +#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac + +#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad + +#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae + +#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af + +#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3 + +#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4 + +#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5 + +#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6 + +#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7 + +#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8 + +#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9 + +#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba + +#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb + +#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc + +#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd + +#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be + +#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf + +#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0 + +#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1 + +#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2 + +#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3 + +#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4 + +#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5 + +#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6 + +#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7 + +#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8 + +#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9 + +#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca + +#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb + +#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc + +#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd + +#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce + +#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf + +#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0 + +#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1 + +#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2 + +#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2 + +#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3 + +#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4 + +#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5 + +#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6 + +#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7 + +#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8 + +#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9 + +#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea + +#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb + +#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec + +#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed + +#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee + +#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef + +#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0 + +#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1 + +#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2 + +#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3 + +#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4 + +#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5 + +#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6 + +#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7 + +#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8 + +#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9 + +#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa + +#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb + +#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc + +#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd + +#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe + +#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff + +#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100 + +#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101 + +#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102 + +#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a + +#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b + +#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c + +#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d + +#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e + +#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f + +#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112 + +#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113 + +#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114 + +#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115 + +#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116 + +#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117 + +#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118 + +#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119 + +#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a + +#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b + +#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c + +#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d + +#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e + +#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f + +#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120 + +#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121 + +#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122 + +#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123 + +#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124 + +#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125 + +#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126 + +#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127 + +#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128 + +#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129 + +#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a + +#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b + +#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c + +#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d + +#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e + +#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f + +#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130 + +#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131 + +#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132 + +#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133 + +#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134 + +#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135 + +#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136 + +#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137 + +#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138 + +#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139 + +#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a + +#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b + +#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c + +#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d + +#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e + +#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f + +#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140 + +#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141 + +#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142 + +#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143 + +#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144 + +#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145 + +#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146 + +#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147 + +#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148 + +#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149 + +#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a + +#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b + +#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c + +#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d + +#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e + +#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f + +#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166 + +#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167 + +#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168 + +#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169 + +#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e + +#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; } + +#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080 + +#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081 + +#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a + +#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b + +#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c + +#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d + +static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; } + +static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; } + +#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099 + +#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a + +#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170 + +#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171 + +#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172 + +#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173 + +#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174 + +#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178 + +#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179 + +#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a + +#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d + +#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182 + +#define REG_A4XX_RBBM_AHB_STATUS 0x00000189 + +#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c + +#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d + +#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f + +#define REG_A4XX_RBBM_STATUS 0x00000191 +#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001 +#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002 +#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004 +#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000 +#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000 +#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000 +#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000 +#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000 +#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000 +#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000 +#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000 +#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000 +#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000 +#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000 +#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000 +#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000 +#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000 +#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000 +#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000 +#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000 +#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000 + +#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f + +#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0 +#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000 + +#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8 + +#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228 + +#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229 + +#define REG_A4XX_CP_RB_BASE 0x00000200 + +#define REG_A4XX_CP_RB_CNTL 0x00000201 + +#define REG_A4XX_CP_RB_WPTR 0x00000205 + +#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203 + +#define REG_A4XX_CP_RB_RPTR 0x00000204 + +#define REG_A4XX_CP_IB1_BASE 0x00000206 + +#define REG_A4XX_CP_IB1_BUFSZ 0x00000207 + +#define REG_A4XX_CP_IB2_BASE 0x00000208 + +#define REG_A4XX_CP_IB2_BUFSZ 0x00000209 + +#define REG_A4XX_CP_ME_NRT_ADDR 0x0000020c + +#define REG_A4XX_CP_ME_NRT_DATA 0x0000020d + +#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217 + +#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219 + +#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b + +#define REG_A4XX_CP_ROQ_ADDR 0x0000021c + +#define REG_A4XX_CP_ROQ_DATA 0x0000021d + +#define REG_A4XX_CP_MEQ_ADDR 0x0000021e + +#define REG_A4XX_CP_MEQ_DATA 0x0000021f + +#define REG_A4XX_CP_MERCIU_ADDR 0x00000220 + +#define REG_A4XX_CP_MERCIU_DATA 0x00000221 + +#define REG_A4XX_CP_MERCIU_DATA2 0x00000222 + +#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223 + +#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224 + +#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225 + +#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226 + +#define REG_A4XX_CP_ME_RAM_DATA 0x00000227 + +#define REG_A4XX_CP_PREEMPT 0x0000022a + +#define REG_A4XX_CP_CNTL 0x0000022c + +#define REG_A4XX_CP_ME_CNTL 0x0000022d + +#define REG_A4XX_CP_DEBUG 0x0000022e + +#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231 + +#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232 + +static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; } + +static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; } +#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff +#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 +static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) +{ + return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK; +} +#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000 +#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24 +static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) +{ + return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK; +} +#define A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000 +#define A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29 +static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val) +{ + return ((val) << A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK; +} +#define A4XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000 +#define A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30 +static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_READ(uint32_t val) +{ + return ((val) << A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_READ__MASK; +} + +#define REG_A4XX_CP_PROTECT_CTRL 0x00000250 + +#define REG_A4XX_CP_ST_BASE 0x000004c0 + +#define REG_A4XX_CP_STQ_AVAIL 0x000004ce + +#define REG_A4XX_CP_MERCIU_STAT 0x000004d0 + +#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2 + +#define REG_A4XX_CP_HW_FAULT 0x000004d8 + +#define REG_A4XX_CP_PROTECT_STATUS 0x000004da + +#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd + +#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506 + +#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507 + +#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b + +static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; } + +static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; } + +#define REG_A4XX_SP_VS_STATUS 0x00000ec0 + +#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9 + +#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca + +#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb + +#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc + +#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd + +#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece + +#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf + +#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0 +#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000 + +#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1 +#define A4XX_SP_INSTR_CACHE_CTRL_VS_BUFFER 0x00000080 +#define A4XX_SP_INSTR_CACHE_CTRL_FS_BUFFER 0x00000100 +#define A4XX_SP_INSTR_CACHE_CTRL_INSTR_BUFFER 0x00000400 + +#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4 +#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK; +} +#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002 +#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 +#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 +static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) +{ + return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK; +} +#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; +} +#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000 +#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000 + +#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5 +#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff +#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK; +} +#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000 +#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24 +static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val) +{ + return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK; +} + +#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6 +#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK; +} +#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00 +#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8 +static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val) +{ + return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK; +} +#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000 +#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20 +static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val) +{ + return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK; +} + +static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; } +#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff +#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK; +} +#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK; +} +#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK; +} +#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; } +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0 +#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A4XX_SP_VS_OBJ_START 0x000022e1 + +#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2 + +#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3 + +#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5 + +#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8 +#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK; +} +#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002 +#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 +#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 +static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) +{ + return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK; +} +#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; +} +#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000 +#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000 + +#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9 +#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff +#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK; +} +#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000 +#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000 +#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000 + +#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea +#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A4XX_SP_FS_OBJ_START 0x000022eb + +#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec + +#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed + +#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef + +#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0 +#define A4XX_SP_FS_OUTPUT_REG_MRT__MASK 0x0000000f +#define A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0 +static inline uint32_t A4XX_SP_FS_OUTPUT_REG_MRT(uint32_t val) +{ + return ((val) << A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_SP_FS_OUTPUT_REG_MRT__MASK; +} +#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 +#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 +#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 +static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK; +} +#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK 0xff000000 +#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT 24 +static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK; +} + +static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; } +#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff +#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK; +} +#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100 +#define A4XX_SP_FS_MRT_REG_COLOR_SINT 0x00000400 +#define A4XX_SP_FS_MRT_REG_COLOR_UINT 0x00000800 +#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000 +#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12 +static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val) +{ + return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK; +} +#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000 + +#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300 +#define A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK; +} +#define A4XX_SP_CS_CTRL_REG0_VARYING 0x00000002 +#define A4XX_SP_CS_CTRL_REG0_CACHEINVALID 0x00000004 +#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000 +#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18 +static inline uint32_t A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val) +{ + return ((val) << A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK; +} +#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK; +} +#define A4XX_SP_CS_CTRL_REG0_SUPERTHREADMODE 0x00200000 +#define A4XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00400000 + +#define REG_A4XX_SP_CS_OBJ_OFFSET_REG 0x00002301 + +#define REG_A4XX_SP_CS_OBJ_START 0x00002302 + +#define REG_A4XX_SP_CS_PVT_MEM_PARAM 0x00002303 + +#define REG_A4XX_SP_CS_PVT_MEM_ADDR 0x00002304 + +#define REG_A4XX_SP_CS_PVT_MEM_SIZE 0x00002305 + +#define REG_A4XX_SP_CS_LENGTH_REG 0x00002306 + +#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d +#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A4XX_SP_HS_OBJ_START 0x0000230e + +#define REG_A4XX_SP_HS_PVT_MEM_PARAM 0x0000230f + +#define REG_A4XX_SP_HS_PVT_MEM_ADDR 0x00002310 + +#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312 + +#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a +#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK; +} +#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 +#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 +static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) +{ + return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK; +} + +static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; } +#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff +#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK; +} +#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK; +} +#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK; +} +#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; } +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334 +#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A4XX_SP_DS_OBJ_START 0x00002335 + +#define REG_A4XX_SP_DS_PVT_MEM_PARAM 0x00002336 + +#define REG_A4XX_SP_DS_PVT_MEM_ADDR 0x00002337 + +#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339 + +#define REG_A4XX_SP_GS_PARAM_REG 0x00002341 +#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff +#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0 +static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK; +} +#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00 +#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8 +static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK; +} +#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000 +#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20 +static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val) +{ + return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK; +} + +static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; } +#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff +#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK; +} +#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00 +#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9 +static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK; +} +#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000 +#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK; +} +#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000 +#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25 +static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; } + +static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; } +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b +#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 +#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 +static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000 +#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25 +static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; +} + +#define REG_A4XX_SP_GS_OBJ_START 0x0000235c + +#define REG_A4XX_SP_GS_PVT_MEM_PARAM 0x0000235d + +#define REG_A4XX_SP_GS_PVT_MEM_ADDR 0x0000235e + +#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360 + +#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60 + +#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61 + +#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64 + +#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65 + +#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66 + +#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67 + +#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68 + +#define REG_A4XX_VPC_ATTR 0x00002140 +#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff +#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0 +static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val) +{ + return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK; +} +#define A4XX_VPC_ATTR_PSIZE 0x00000200 +#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000 +#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12 +static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val) +{ + return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK; +} +#define A4XX_VPC_ATTR_ENABLE 0x02000000 + +#define REG_A4XX_VPC_PACK 0x00002141 +#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff +#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0 +static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val) +{ + return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK; +} +#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00 +#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8 +static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val) +{ + return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK; +} +#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000 +#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16 +static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val) +{ + return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK; +} + +static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; } + +static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; } + +static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; } + +static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; } + +#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e + +#define REG_A4XX_VSC_BIN_SIZE 0x00000c00 +#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f +#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val) +{ + return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK; +} +#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0 +#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5 +static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK; +} + +#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01 + +#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02 + +#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03 + +static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; } + +static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; } +#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff +#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0 +static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val) +{ + return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK; +} +#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00 +#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10 +static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val) +{ + return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK; +} +#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000 +#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20 +static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val) +{ + return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK; +} +#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000 +#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24 +static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) +{ + return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK; +} + +static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; } + +static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; } + +static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; } + +static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; } + +#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41 + +#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50 + +#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51 + +#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49 + +#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a + +#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0 + +#define REG_A4XX_VGT_EVENT_INITIATOR 0x000021d9 + +#define REG_A4XX_VFD_CONTROL_0 0x00002200 +#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff +#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0 +static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK; +} +#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00 +#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9 +static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK; +} +#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000 +#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20 +static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK; +} +#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000 +#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26 +static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK; +} + +#define REG_A4XX_VFD_CONTROL_1 0x00002201 +#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff +#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0 +static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK; +} +#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000 +#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16 +static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK; +} +#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000 +#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24 +static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK; +} + +#define REG_A4XX_VFD_CONTROL_2 0x00002202 + +#define REG_A4XX_VFD_CONTROL_3 0x00002203 +#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00 +#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK; +} +#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 +#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK; +} +#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 +#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 +static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) +{ + return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK; +} + +#define REG_A4XX_VFD_CONTROL_4 0x00002204 + +#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208 + +static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; } + +static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; } +#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f +#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0 +static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val) +{ + return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK; +} +#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80 +#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7 +static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val) +{ + return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK; +} +#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000 +#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000 + +static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; } + +static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; } +#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xffffffff +#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 0 +static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val) +{ + return ((val) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK; +} + +static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; } +#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff +#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0 +static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val) +{ + return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK; +} + +static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; } + +static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; } +#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f +#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0 +static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val) +{ + return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK; +} +#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010 +#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0 +#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6 +static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val) +{ + return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK; +} +#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000 +#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12 +static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val) +{ + return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK; +} +#define A4XX_VFD_DECODE_INSTR_INT 0x00100000 +#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 +#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 +static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK; +} +#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000 +#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24 +static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val) +{ + return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK; +} +#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000 +#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000 + +#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00 + +#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09 + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a + +#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b + +#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380 + +#define REG_A4XX_TPL1_TP_TEX_COUNT 0x00002381 +#define A4XX_TPL1_TP_TEX_COUNT_VS__MASK 0x000000ff +#define A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT 0 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_VS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_VS__MASK; +} +#define A4XX_TPL1_TP_TEX_COUNT_HS__MASK 0x0000ff00 +#define A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT 8 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_HS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_HS__MASK; +} +#define A4XX_TPL1_TP_TEX_COUNT_DS__MASK 0x00ff0000 +#define A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT 16 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_DS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_DS__MASK; +} +#define A4XX_TPL1_TP_TEX_COUNT_GS__MASK 0xff000000 +#define A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT 24 +static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val) +{ + return ((val) << A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_GS__MASK; +} + +#define REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002384 + +#define REG_A4XX_TPL1_TP_HS_BORDER_COLOR_BASE_ADDR 0x00002387 + +#define REG_A4XX_TPL1_TP_DS_BORDER_COLOR_BASE_ADDR 0x0000238a + +#define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d + +#define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0 + +#define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1 + +#define REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR 0x000023a4 + +#define REG_A4XX_TPL1_TP_CS_SAMPLER_BASE_ADDR 0x000023a5 + +#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6 + +#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80 + +#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81 + +#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88 + +#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89 + +#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a + +#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b + +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c + +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d + +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e + +#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f + +#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000 +#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000 +#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000 +#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000 +#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000 + +#define REG_A4XX_GRAS_CNTL 0x00002003 +#define A4XX_GRAS_CNTL_IJ_PERSP 0x00000001 +#define A4XX_GRAS_CNTL_IJ_LINEAR 0x00000002 + +#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004 +#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff +#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0 +static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val) +{ + return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK; +} +#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00 +#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10 +static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val) +{ + return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK; +} + +#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008 +#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff +#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0 +static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val) +{ + return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK; +} + +#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009 +#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff +#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0 +static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val) +{ + return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK; +} + +#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a +#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff +#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0 +static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val) +{ + return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK; +} + +#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b +#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff +#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0 +static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val) +{ + return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK; +} + +#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c +#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff +#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0 +static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val) +{ + return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK; +} + +#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d +#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff +#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0 +static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val) +{ + return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK; +} + +#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070 +#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK; +} +#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK; +} + +#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071 +#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff +#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0 +static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val) +{ + return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK; +} + +#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073 +#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004 +#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008 + +#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074 +#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff +#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 +static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val) +{ + return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK; +} + +#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075 +#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff +#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) +{ + return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; +} + +#define REG_A4XX_GRAS_SU_POLY_OFFSET_CLAMP 0x00002076 +#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK 0xffffffff +#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT 0 +static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_CLAMP(float val) +{ + return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK; +} + +#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 +#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 +#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 +static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val) +{ + return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK; +} + +#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078 +#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001 +#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002 +#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004 +#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8 +#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 +static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) +{ + return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; +} +#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 +#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000 +#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000 + +#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b +#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c +#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2 +static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK; +} +#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380 +#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7 +static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK; +} +#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800 +#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000 +#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12 +static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK; +} + +#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c +#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff +#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK; +} +#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK; +} + +#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d +#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff +#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK; +} +#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK; +} + +#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c +#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff +#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d +#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff +#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK; +} +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK; +} + +#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK; +} +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000 +#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16 +static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val) +{ + return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK; +} + +#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80 + +#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83 + +#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84 + +#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88 + +#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a + +#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b + +#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94 + +#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95 + +#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00 + +#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04 + +#define REG_A4XX_HLSQ_MODE_CONTROL 0x00000e05 + +#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09 + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c + +#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d + +#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0 +#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010 +#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4 +static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; +} +#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040 +#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 +#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 +#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 +#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000 +#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27 +static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK; +} +#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000 +#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000 +#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000 +#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000 + +#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1 +#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040 +#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6 +static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK; +} +#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100 +#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200 +#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000 +#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16 +static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK; +} +#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK 0xff000000 +#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT 24 +static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK; +} + +#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2 +#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 +#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK; +} +#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc +#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; +} +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK 0x0003fc00 +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT 10 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK; +} +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK 0x03fc0000 +#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT 18 +static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK; +} + +#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3 +#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff +#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; +} +#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 +#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 +static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; +} +#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 +#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 +static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; +} +#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 +#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 +static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; +} + +#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4 +#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff +#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; +} +#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 +#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 +static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; +} + +#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5 +#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff +#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 +#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 +static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000 +#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000 +#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 +#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 +static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} +#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6 +#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff +#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 +#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 +static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000 +#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000 +#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 +#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 +static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} +#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7 +#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff +#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 +#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 +static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000 +#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000 +#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 +#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 +static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} +#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8 +#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff +#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 +#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 +static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000 +#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000 +#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 +#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 +static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} +#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9 +#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff +#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 +#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 +static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000 +#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000 +#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 +#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 +static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} +#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca +#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff +#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK; +} +#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00 +#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8 +static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; +} +#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000 +#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000 +#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 +#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 +static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK; +} +#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000 +#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24 +static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK; +} + +#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd +#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003 +#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK; +} +#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc +#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2 +static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK; +} +#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000 +#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12 +static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK; +} +#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000 +#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22 +static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK; +} + +#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce +#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff +#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK; +} + +#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf + +#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0 +#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff +#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK; +} + +#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1 + +#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2 +#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff +#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK; +} + +#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3 + +#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4 +#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x00000fff +#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK; +} +#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK 0x00fff000 +#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT 12 +static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK; +} +#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000 +#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24 +static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK; +} + +#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5 +#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK 0x00000fff +#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK; +} +#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK 0x00fff000 +#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT 12 +static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK; +} + +#define REG_A4XX_HLSQ_CL_KERNEL_CONST 0x000023d6 +#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK 0x00000fff +#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK; +} +#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK 0x00fff000 +#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT 12 +static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK; +} + +#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_X 0x000023d7 + +#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Y 0x000023d8 + +#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Z 0x000023d9 + +#define REG_A4XX_HLSQ_CL_WG_OFFSET 0x000023da +#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK 0x00000fff +#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT 0 +static inline uint32_t A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID(uint32_t val) +{ + return ((val) << A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK; +} + +#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db + +#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00 +#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001 + +#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08 + +#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c + +#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16 + +#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17 + +#define REG_A4XX_PC_BIN_BASE 0x000021c0 + +#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2 +#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000 +#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16 +static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val) +{ + return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK; +} +#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000 +#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22 +static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val) +{ + return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK; +} + +#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4 +#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f +#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0 +static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val) +{ + return ((val) << A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT) & A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK; +} +#define A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000 +#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 +#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 + +#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5 +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007 +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0 +static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK; +} +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038 +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3 +static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK; +} +#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040 + +#define REG_A4XX_PC_RESTART_INDEX 0x000021c6 + +#define REG_A4XX_PC_GS_PARAM 0x000021e5 +#define A4XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff +#define A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0 +static inline uint32_t A4XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val) +{ + return ((val) << A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A4XX_PC_GS_PARAM_MAX_VERTICES__MASK; +} +#define A4XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800 +#define A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11 +static inline uint32_t A4XX_PC_GS_PARAM_INVOCATIONS(uint32_t val) +{ + return ((val) << A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A4XX_PC_GS_PARAM_INVOCATIONS__MASK; +} +#define A4XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000 +#define A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23 +static inline uint32_t A4XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_GS_PARAM_PRIMTYPE__MASK; +} +#define A4XX_PC_GS_PARAM_LAYER 0x80000000 + +#define REG_A4XX_PC_HS_PARAM 0x000021e7 +#define A4XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f +#define A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0 +static inline uint32_t A4XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val) +{ + return ((val) << A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A4XX_PC_HS_PARAM_VERTICES_OUT__MASK; +} +#define A4XX_PC_HS_PARAM_SPACING__MASK 0x00600000 +#define A4XX_PC_HS_PARAM_SPACING__SHIFT 21 +static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val) +{ + return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK; +} +#define A4XX_PC_HS_PARAM_CW 0x00800000 +#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000 + +#define REG_A4XX_VBIF_VERSION 0x00003000 + +#define REG_A4XX_VBIF_CLKON 0x00003001 +#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001 + +#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c + +#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d + +#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a + +#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c + +#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d + +#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030 + +#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031 + +#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 + +#define REG_A4XX_VBIF_PERF_CNT_EN0 0x000030c0 + +#define REG_A4XX_VBIF_PERF_CNT_EN1 0x000030c1 + +#define REG_A4XX_VBIF_PERF_CNT_EN2 0x000030c2 + +#define REG_A4XX_VBIF_PERF_CNT_EN3 0x000030c3 + +#define REG_A4XX_VBIF_PERF_CNT_SEL0 0x000030d0 + +#define REG_A4XX_VBIF_PERF_CNT_SEL1 0x000030d1 + +#define REG_A4XX_VBIF_PERF_CNT_SEL2 0x000030d2 + +#define REG_A4XX_VBIF_PERF_CNT_SEL3 0x000030d3 + +#define REG_A4XX_VBIF_PERF_CNT_LOW0 0x000030d8 + +#define REG_A4XX_VBIF_PERF_CNT_LOW1 0x000030d9 + +#define REG_A4XX_VBIF_PERF_CNT_LOW2 0x000030da + +#define REG_A4XX_VBIF_PERF_CNT_LOW3 0x000030db + +#define REG_A4XX_VBIF_PERF_CNT_HIGH0 0x000030e0 + +#define REG_A4XX_VBIF_PERF_CNT_HIGH1 0x000030e1 + +#define REG_A4XX_VBIF_PERF_CNT_HIGH2 0x000030e2 + +#define REG_A4XX_VBIF_PERF_CNT_HIGH3 0x000030e3 + +#define REG_A4XX_VBIF_PERF_PWR_CNT_EN0 0x00003100 + +#define REG_A4XX_VBIF_PERF_PWR_CNT_EN1 0x00003101 + +#define REG_A4XX_VBIF_PERF_PWR_CNT_EN2 0x00003102 + +#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5 + +#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6 + +#define REG_A4XX_UNKNOWN_0D01 0x00000d01 + +#define REG_A4XX_UNKNOWN_0E42 0x00000e42 + +#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2 + +#define REG_A4XX_UNKNOWN_2001 0x00002001 + +#define REG_A4XX_UNKNOWN_209B 0x0000209b + +#define REG_A4XX_UNKNOWN_20EF 0x000020ef + +#define REG_A4XX_UNKNOWN_2152 0x00002152 + +#define REG_A4XX_UNKNOWN_2153 0x00002153 + +#define REG_A4XX_UNKNOWN_2154 0x00002154 + +#define REG_A4XX_UNKNOWN_2155 0x00002155 + +#define REG_A4XX_UNKNOWN_2156 0x00002156 + +#define REG_A4XX_UNKNOWN_2157 0x00002157 + +#define REG_A4XX_UNKNOWN_21C3 0x000021c3 + +#define REG_A4XX_UNKNOWN_21E6 0x000021e6 + +#define REG_A4XX_UNKNOWN_2209 0x00002209 + +#define REG_A4XX_UNKNOWN_22D7 0x000022d7 + +#define REG_A4XX_UNKNOWN_2352 0x00002352 + +#define REG_A4XX_TEX_SAMP_0 0x00000000 +#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 +#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 +#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1 +static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val) +{ + return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK; +} +#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018 +#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3 +static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val) +{ + return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK; +} +#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0 +#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5 +static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val) +{ + return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK; +} +#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700 +#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8 +static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val) +{ + return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK; +} +#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800 +#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11 +static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val) +{ + return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK; +} +#define A4XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 +#define A4XX_TEX_SAMP_0_ANISO__SHIFT 14 +static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val) +{ + return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK; +} +#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 +#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 +static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val) +{ + return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK; +} + +#define REG_A4XX_TEX_SAMP_1 0x00000001 +#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e +#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1 +static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val) +{ + return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK; +} +#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 +#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 +#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 +#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 +#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 +static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK; +} +#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 +#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 +static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK; +} + +#define REG_A4XX_TEX_CONST_0 0x00000000 +#define A4XX_TEX_CONST_0_TILED 0x00000001 +#define A4XX_TEX_CONST_0_SRGB 0x00000004 +#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 +#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4 +static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val) +{ + return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK; +} +#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 +#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 +static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val) +{ + return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK; +} +#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 +#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 +static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val) +{ + return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK; +} +#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 +#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13 +static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val) +{ + return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK; +} +#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 +#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16 +static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK; +} +#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000 +#define A4XX_TEX_CONST_0_FMT__SHIFT 22 +static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val) +{ + return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK; +} +#define A4XX_TEX_CONST_0_TYPE__MASK 0xe0000000 +#define A4XX_TEX_CONST_0_TYPE__SHIFT 29 +static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val) +{ + return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK; +} + +#define REG_A4XX_TEX_CONST_1 0x00000001 +#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff +#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK; +} +#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000 +#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15 +static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK; +} + +#define REG_A4XX_TEX_CONST_2 0x00000002 +#define A4XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f +#define A4XX_TEX_CONST_2_PITCHALIGN__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_2_PITCHALIGN(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A4XX_TEX_CONST_2_PITCHALIGN__MASK; +} +#define A4XX_TEX_CONST_2_BUFFER 0x00000040 +#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00 +#define A4XX_TEX_CONST_2_PITCH__SHIFT 9 +static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK; +} +#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000 +#define A4XX_TEX_CONST_2_SWAP__SHIFT 30 +static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK; +} + +#define REG_A4XX_TEX_CONST_3 0x00000003 +#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff +#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val) +{ + return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK; +} +#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000 +#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18 +static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val) +{ + return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK; +} + +#define REG_A4XX_TEX_CONST_4 0x00000004 +#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f +#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0 +static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val) +{ + return ((val >> 12) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK; +} +#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0 +#define A4XX_TEX_CONST_4_BASE__SHIFT 5 +static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val) +{ + return ((val >> 5) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK; +} + +#define REG_A4XX_TEX_CONST_5 0x00000005 + +#define REG_A4XX_TEX_CONST_6 0x00000006 + +#define REG_A4XX_TEX_CONST_7 0x00000007 + +#define REG_A4XX_SSBO_0_0 0x00000000 +#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0 +#define A4XX_SSBO_0_0_BASE__SHIFT 5 +static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val) +{ + return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK; +} + +#define REG_A4XX_SSBO_0_1 0x00000001 +#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff +#define A4XX_SSBO_0_1_PITCH__SHIFT 0 +static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val) +{ + return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK; +} + +#define REG_A4XX_SSBO_0_2 0x00000002 +#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000 +#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12 +static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK; +} + +#define REG_A4XX_SSBO_0_3 0x00000003 +#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f +#define A4XX_SSBO_0_3_CPP__SHIFT 0 +static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val) +{ + return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK; +} + +#define REG_A4XX_SSBO_1_0 0x00000000 +#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f +#define A4XX_SSBO_1_0_CPP__SHIFT 0 +static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val) +{ + return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK; +} +#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00 +#define A4XX_SSBO_1_0_FMT__SHIFT 8 +static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val) +{ + return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK; +} +#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000 +#define A4XX_SSBO_1_0_WIDTH__SHIFT 16 +static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val) +{ + return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK; +} + +#define REG_A4XX_SSBO_1_1 0x00000001 +#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff +#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0 +static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val) +{ + return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK; +} +#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000 +#define A4XX_SSBO_1_1_DEPTH__SHIFT 16 +static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val) +{ + return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK; +} + + +#endif /* A4XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c new file mode 100644 index 000000000..a10feb8a4 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -0,0 +1,730 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2014 The Linux Foundation. All rights reserved. + */ +#include "a4xx_gpu.h" + +#define A4XX_INT0_MASK \ + (A4XX_INT0_RBBM_AHB_ERROR | \ + A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \ + A4XX_INT0_CP_T0_PACKET_IN_IB | \ + A4XX_INT0_CP_OPCODE_ERROR | \ + A4XX_INT0_CP_RESERVED_BIT_ERROR | \ + A4XX_INT0_CP_HW_FAULT | \ + A4XX_INT0_CP_IB1_INT | \ + A4XX_INT0_CP_IB2_INT | \ + A4XX_INT0_CP_RB_INT | \ + A4XX_INT0_CP_REG_PROTECT_FAULT | \ + A4XX_INT0_CP_AHB_ERROR_HALT | \ + A4XX_INT0_CACHE_FLUSH_TS | \ + A4XX_INT0_UCHE_OOB_ACCESS) + +extern bool hang_debug; +static void a4xx_dump(struct msm_gpu *gpu); +static bool a4xx_idle(struct msm_gpu *gpu); + +static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_ringbuffer *ring = submit->ring; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + /* ignore IB-targets */ + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + /* ignore if there has not been a ctx switch: */ + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + OUT_PKT2(ring); + break; + } + } + + OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); + OUT_RING(ring, submit->seqno); + + /* Flush HLSQ lazy updates to make sure there is nothing + * pending for indirect loads after the timestamp has + * passed: + */ + OUT_PKT3(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, HLSQ_FLUSH); + + /* wait for idle before cache flush/interrupt */ + OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); + OUT_RING(ring, 0x00000000); + + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ + OUT_PKT3(ring, CP_EVENT_WRITE, 3); + OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ); + OUT_RING(ring, rbmemptr(ring, fence)); + OUT_RING(ring, submit->seqno); + + adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); +} + +/* + * a4xx_enable_hwcg() - Program the clock control registers + * @device: The adreno device pointer + */ +static void a4xx_enable_hwcg(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + unsigned int i; + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112); + for (i = 0; i < 4; i++) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222); + + /* Disable L1 clocking in A420 due to CCU issues with it */ + for (i = 0; i < 4; i++) { + if (adreno_is_a420(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i), + 0x00002020); + } else { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i), + 0x00022020); + } + } + + /* No CCU for A405 */ + if (!adreno_is_a405(adreno_gpu)) { + for (i = 0; i < 4; i++) { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i), + 0x00000922); + } + + for (i = 0; i < 4; i++) { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i), + 0x00000000); + } + + for (i = 0; i < 4; i++) { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i), + 0x00000001); + } + } + + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000); + /* Early A430's have a timing issue with SP/TP power collapse; + disabling HW clock gating prevents it. */ + if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2) + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0); + else + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA); + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0); +} + + +static bool a4xx_me_init(struct msm_gpu *gpu) +{ + struct msm_ringbuffer *ring = gpu->rb[0]; + + OUT_PKT3(ring, CP_ME_INIT, 17); + OUT_RING(ring, 0x000003f7); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000080); + OUT_RING(ring, 0x00000100); + OUT_RING(ring, 0x00000180); + OUT_RING(ring, 0x00006600); + OUT_RING(ring, 0x00000150); + OUT_RING(ring, 0x0000014e); + OUT_RING(ring, 0x00000154); + OUT_RING(ring, 0x00000001); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); + return a4xx_idle(gpu); +} + +static int a4xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu); + uint32_t *ptr, len; + int i, ret; + + if (adreno_is_a405(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + } else if (adreno_is_a420(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F); + gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4); + gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001); + gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018); + gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018); + gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + } else if (adreno_is_a430(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001); + gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018); + gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818); + gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018); + gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + } else { + BUG(); + } + + /* Make all blocks contribute to the GPU BUSY perf counter */ + gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff); + + /* Tune the hystersis counters for SP and CP idle detection */ + gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10); + gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10); + + if (adreno_is_a430(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30); + } + + /* Enable the RBBM error reporting bits */ + gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001); + + /* Enable AHB error reporting*/ + gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff); + + /* Enable power counters*/ + gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030); + + /* + * Turn on hang detection - this spews a lot of useful information + * into the RBBM registers on a hang: + */ + gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL, + (1 << 30) | 0xFFFF); + + gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR, + (unsigned int)(a4xx_gpu->ocmem.base >> 14)); + + /* Turn on performance counters: */ + gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01); + + /* use the first CP counter for timestamp queries.. userspace may set + * this as well but it selects the same counter/countable: + */ + gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT); + + if (adreno_is_a430(adreno_gpu)) + gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07); + + /* Disable L2 bypass to avoid UCHE out of bounds errors */ + gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000); + gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000); + + gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) | + (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0)); + + /* On A430 enable SP regfile sleep for power savings */ + /* TODO downstream does this for !420, so maybe applies for 405 too? */ + if (!adreno_is_a420(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0, + 0x00000441); + gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1, + 0x00000441); + } + + a4xx_enable_hwcg(gpu); + + /* + * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2 + * due to timing issue with HLSQ_TP_CLK_EN + */ + if (adreno_is_a420(adreno_gpu)) { + unsigned int val; + val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ); + val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK; + val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT; + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val); + } + + /* setup access protection: */ + gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007); + + /* RBBM registers */ + gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010); + gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020); + gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040); + gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080); + gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100); + gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200); + + /* CP registers */ + gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800); + gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600); + + + /* RB registers */ + gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300); + + /* HLSQ registers */ + gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800); + + /* VPC registers */ + gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980); + + /* SMMU registers */ + gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000); + + gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK); + + ret = adreno_hw_init(gpu); + if (ret) + return ret; + + /* + * Use the default ringbuffer size and block size but disable the RPTR + * shadow + */ + gpu_write(gpu, REG_A4XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Set the ringbuffer address */ + gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova)); + + /* Load PM4: */ + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; + DBG("loading PM4 ucode version: %u", ptr[0]); + gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]); + + /* Load PFP: */ + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); + len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; + DBG("loading PFP ucode version: %u", ptr[0]); + + gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0); + for (i = 1; i < len; i++) + gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]); + + /* clear ME_HALT to start micro engine */ + gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0); + + return a4xx_me_init(gpu) ? 0 : -EINVAL; +} + +static void a4xx_recover(struct msm_gpu *gpu) +{ + int i; + + adreno_dump_info(gpu); + + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); + } + + /* dump registers before resetting gpu, if enabled: */ + if (hang_debug) + a4xx_dump(gpu); + + gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1); + gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD); + gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0); + adreno_recover(gpu); +} + +static void a4xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu); + + DBG("%s", gpu->name); + + adreno_gpu_cleanup(adreno_gpu); + + adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem); + + kfree(a4xx_gpu); +} + +static bool a4xx_idle(struct msm_gpu *gpu) +{ + /* wait for ringbuffer to drain: */ + if (!adreno_idle(gpu, gpu->rb[0])) + return false; + + /* then wait for GPU to finish: */ + if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) & + A4XX_RBBM_STATUS_GPU_BUSY))) { + DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name); + /* TODO maybe we need to reset GPU here to recover from hang? */ + return false; + } + + return true; +} + +static irqreturn_t a4xx_irq(struct msm_gpu *gpu) +{ + uint32_t status; + + status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS); + DBG("%s: Int status %08x", gpu->name, status); + + if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) { + uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS); + printk("CP | Protected mode error| %s | addr=%x\n", + reg & (1 << 24) ? "WRITE" : "READ", + (reg & 0xFFFFF) >> 2); + } + + gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status); + + msm_gpu_retire(gpu); + + return IRQ_HANDLED; +} + +static const unsigned int a4xx_registers[] = { + /* RBBM */ + 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026, + 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066, + 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF, + /* CP */ + 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B, + 0x0578, 0x058F, + /* VSC */ + 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51, + /* GRAS */ + 0x0C80, 0x0C81, 0x0C88, 0x0C8F, + /* RB */ + 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2, + /* PC */ + 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23, + /* VFD */ + 0x0E40, 0x0E4A, + /* VPC */ + 0x0E60, 0x0E61, 0x0E63, 0x0E68, + /* UCHE */ + 0x0E80, 0x0E84, 0x0E88, 0x0E95, + /* VMIDMT */ + 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A, + 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024, + 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104, + 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300, + 0x1380, 0x1380, + /* GRAS CTX 0 */ + 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E, + /* PC CTX 0 */ + 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7, + /* VFD CTX 0 */ + 0x2200, 0x2204, 0x2208, 0x22A9, + /* GRAS CTX 1 */ + 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E, + /* PC CTX 1 */ + 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7, + /* VFD CTX 1 */ + 0x2600, 0x2604, 0x2608, 0x26A9, + /* XPU */ + 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20, + 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40, + 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95, + /* VBIF */ + 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022, + 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, + 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, + 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, + 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, + 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, + 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, + 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, + 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C, + 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416, + 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436, + 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480, + 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004, + 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016, + 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200, + 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802, + 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816, + 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF, + 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925, + 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E, + 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00, + 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10, + 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60, + 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3, + 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B, + 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0, + 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6, + 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416, + 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780, + 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4, + 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F, + 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C, + 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9, + 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE, + ~0 /* sentinel */ +}; + +static const unsigned int a405_registers[] = { + /* RBBM */ + 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026, + 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066, + 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF, + /* CP */ + 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B, + 0x0578, 0x058F, + /* VSC */ + 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51, + /* GRAS */ + 0x0C80, 0x0C81, 0x0C88, 0x0C8F, + /* RB */ + 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2, + /* PC */ + 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23, + /* VFD */ + 0x0E40, 0x0E4A, + /* VPC */ + 0x0E60, 0x0E61, 0x0E63, 0x0E68, + /* UCHE */ + 0x0E80, 0x0E84, 0x0E88, 0x0E95, + /* GRAS CTX 0 */ + 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E, + /* PC CTX 0 */ + 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7, + /* VFD CTX 0 */ + 0x2200, 0x2204, 0x2208, 0x22A9, + /* GRAS CTX 1 */ + 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E, + /* PC CTX 1 */ + 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7, + /* VFD CTX 1 */ + 0x2600, 0x2604, 0x2608, 0x26A9, + /* VBIF version 0x20050000*/ + 0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036, + 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049, + 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D, + 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098, + 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0, + 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108, + 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125, + 0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410, + ~0 /* sentinel */ +}; + +static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu) +{ + struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (!state) + return ERR_PTR(-ENOMEM); + + adreno_gpu_state_get(gpu, state); + + state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS); + + return state; +} + +static void a4xx_dump(struct msm_gpu *gpu) +{ + printk("status: %08x\n", + gpu_read(gpu, REG_A4XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +static int a4xx_pm_resume(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + ret = msm_gpu_pm_resume(gpu); + if (ret) + return ret; + + if (adreno_is_a430(adreno_gpu)) { + unsigned int reg; + /* Set the default register values; set SW_COLLAPSE to 0 */ + gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000); + do { + udelay(5); + reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS); + } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON)); + } + return 0; +} + +static int a4xx_pm_suspend(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + ret = msm_gpu_pm_suspend(gpu); + if (ret) + return ret; + + if (adreno_is_a430(adreno_gpu)) { + /* Set the default register values; set SW_COLLAPSE to 1 */ + gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001); + } + return 0; +} + +static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +{ + *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO); + + return 0; +} + +static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR); + return ring->memptrs->rptr; +} + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a4xx_hw_init, + .pm_suspend = a4xx_pm_suspend, + .pm_resume = a4xx_pm_resume, + .recover = a4xx_recover, + .submit = a4xx_submit, + .active_ring = adreno_active_ring, + .irq = a4xx_irq, + .destroy = a4xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = adreno_show, +#endif + .gpu_state_get = a4xx_gpu_state_get, + .gpu_state_put = adreno_gpu_state_put, + .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a4xx_get_rptr, + }, + .get_timestamp = a4xx_get_timestamp, +}; + +struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) +{ + struct a4xx_gpu *a4xx_gpu = NULL; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct icc_path *ocmem_icc_path; + struct icc_path *icc_path; + int ret; + + if (!pdev) { + DRM_DEV_ERROR(dev->dev, "no a4xx device\n"); + ret = -ENXIO; + goto fail; + } + + a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL); + if (!a4xx_gpu) { + ret = -ENOMEM; + goto fail; + } + + adreno_gpu = &a4xx_gpu->base; + gpu = &adreno_gpu->base; + + gpu->perfcntrs = NULL; + gpu->num_perfcntrs = 0; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + if (ret) + goto fail; + + adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers : + a4xx_registers; + + /* if needed, allocate gmem: */ + ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, + &a4xx_gpu->ocmem); + if (ret) + goto fail; + + if (!gpu->aspace) { + /* TODO we think it is possible to configure the GPU to + * restrict access to VRAM carveout. But the required + * registers are unknown. For now just bail out and + * limp along with just modesetting. If it turns out + * to not be possible to restrict access, then we must + * implement a cmdstream validator. + */ + DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); + if (!allow_vram_carveout) { + ret = -ENXIO; + goto fail; + } + } + + icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); + if (IS_ERR(icc_path)) { + ret = PTR_ERR(icc_path); + goto fail; + } + + ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); + if (IS_ERR(ocmem_icc_path)) { + ret = PTR_ERR(ocmem_icc_path); + /* allow -ENODATA, ocmem icc is optional */ + if (ret != -ENODATA) + goto fail; + ocmem_icc_path = NULL; + } + + /* + * Set the ICC path to maximum speed for now by multiplying the fastest + * frequency by the bus width (8). We'll want to scale this later on to + * improve battery life. + */ + icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8); + + return gpu; + +fail: + if (a4xx_gpu) + a4xx_destroy(&a4xx_gpu->base.base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h new file mode 100644 index 000000000..a01448cba --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2014 The Linux Foundation. All rights reserved. + */ +#ifndef __A4XX_GPU_H__ +#define __A4XX_GPU_H__ + +#include "adreno_gpu.h" + +/* arrg, somehow fb.h is getting pulled in: */ +#undef ROP_COPY +#undef ROP_XOR + +#include "a4xx.xml.h" + +struct a4xx_gpu { + struct adreno_gpu base; + + /* if OCMEM is used for GMEM: */ + struct adreno_ocmem ocmem; +}; +#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base) + +#endif /* __A4XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h new file mode 100644 index 000000000..2505b4e43 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h @@ -0,0 +1,5492 @@ +#ifndef A5XX_XML +#define A5XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2022 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a5xx_color_fmt { + RB5_A8_UNORM = 2, + RB5_R8_UNORM = 3, + RB5_R8_SNORM = 4, + RB5_R8_UINT = 5, + RB5_R8_SINT = 6, + RB5_R4G4B4A4_UNORM = 8, + RB5_R5G5B5A1_UNORM = 10, + RB5_R5G6B5_UNORM = 14, + RB5_R8G8_UNORM = 15, + RB5_R8G8_SNORM = 16, + RB5_R8G8_UINT = 17, + RB5_R8G8_SINT = 18, + RB5_R16_UNORM = 21, + RB5_R16_SNORM = 22, + RB5_R16_FLOAT = 23, + RB5_R16_UINT = 24, + RB5_R16_SINT = 25, + RB5_R8G8B8A8_UNORM = 48, + RB5_R8G8B8_UNORM = 49, + RB5_R8G8B8A8_SNORM = 50, + RB5_R8G8B8A8_UINT = 51, + RB5_R8G8B8A8_SINT = 52, + RB5_R10G10B10A2_UNORM = 55, + RB5_R10G10B10A2_UINT = 58, + RB5_R11G11B10_FLOAT = 66, + RB5_R16G16_UNORM = 67, + RB5_R16G16_SNORM = 68, + RB5_R16G16_FLOAT = 69, + RB5_R16G16_UINT = 70, + RB5_R16G16_SINT = 71, + RB5_R32_FLOAT = 74, + RB5_R32_UINT = 75, + RB5_R32_SINT = 76, + RB5_R16G16B16A16_UNORM = 96, + RB5_R16G16B16A16_SNORM = 97, + RB5_R16G16B16A16_FLOAT = 98, + RB5_R16G16B16A16_UINT = 99, + RB5_R16G16B16A16_SINT = 100, + RB5_R32G32_FLOAT = 103, + RB5_R32G32_UINT = 104, + RB5_R32G32_SINT = 105, + RB5_R32G32B32A32_FLOAT = 130, + RB5_R32G32B32A32_UINT = 131, + RB5_R32G32B32A32_SINT = 132, + RB5_NONE = 255, +}; + +enum a5xx_tile_mode { + TILE5_LINEAR = 0, + TILE5_2 = 2, + TILE5_3 = 3, +}; + +enum a5xx_vtx_fmt { + VFMT5_8_UNORM = 3, + VFMT5_8_SNORM = 4, + VFMT5_8_UINT = 5, + VFMT5_8_SINT = 6, + VFMT5_8_8_UNORM = 15, + VFMT5_8_8_SNORM = 16, + VFMT5_8_8_UINT = 17, + VFMT5_8_8_SINT = 18, + VFMT5_16_UNORM = 21, + VFMT5_16_SNORM = 22, + VFMT5_16_FLOAT = 23, + VFMT5_16_UINT = 24, + VFMT5_16_SINT = 25, + VFMT5_8_8_8_UNORM = 33, + VFMT5_8_8_8_SNORM = 34, + VFMT5_8_8_8_UINT = 35, + VFMT5_8_8_8_SINT = 36, + VFMT5_8_8_8_8_UNORM = 48, + VFMT5_8_8_8_8_SNORM = 50, + VFMT5_8_8_8_8_UINT = 51, + VFMT5_8_8_8_8_SINT = 52, + VFMT5_10_10_10_2_UNORM = 54, + VFMT5_10_10_10_2_SNORM = 57, + VFMT5_10_10_10_2_UINT = 58, + VFMT5_10_10_10_2_SINT = 59, + VFMT5_11_11_10_FLOAT = 66, + VFMT5_16_16_UNORM = 67, + VFMT5_16_16_SNORM = 68, + VFMT5_16_16_FLOAT = 69, + VFMT5_16_16_UINT = 70, + VFMT5_16_16_SINT = 71, + VFMT5_32_UNORM = 72, + VFMT5_32_SNORM = 73, + VFMT5_32_FLOAT = 74, + VFMT5_32_UINT = 75, + VFMT5_32_SINT = 76, + VFMT5_32_FIXED = 77, + VFMT5_16_16_16_UNORM = 88, + VFMT5_16_16_16_SNORM = 89, + VFMT5_16_16_16_FLOAT = 90, + VFMT5_16_16_16_UINT = 91, + VFMT5_16_16_16_SINT = 92, + VFMT5_16_16_16_16_UNORM = 96, + VFMT5_16_16_16_16_SNORM = 97, + VFMT5_16_16_16_16_FLOAT = 98, + VFMT5_16_16_16_16_UINT = 99, + VFMT5_16_16_16_16_SINT = 100, + VFMT5_32_32_UNORM = 101, + VFMT5_32_32_SNORM = 102, + VFMT5_32_32_FLOAT = 103, + VFMT5_32_32_UINT = 104, + VFMT5_32_32_SINT = 105, + VFMT5_32_32_FIXED = 106, + VFMT5_32_32_32_UNORM = 112, + VFMT5_32_32_32_SNORM = 113, + VFMT5_32_32_32_UINT = 114, + VFMT5_32_32_32_SINT = 115, + VFMT5_32_32_32_FLOAT = 116, + VFMT5_32_32_32_FIXED = 117, + VFMT5_32_32_32_32_UNORM = 128, + VFMT5_32_32_32_32_SNORM = 129, + VFMT5_32_32_32_32_FLOAT = 130, + VFMT5_32_32_32_32_UINT = 131, + VFMT5_32_32_32_32_SINT = 132, + VFMT5_32_32_32_32_FIXED = 133, + VFMT5_NONE = 255, +}; + +enum a5xx_tex_fmt { + TFMT5_A8_UNORM = 2, + TFMT5_8_UNORM = 3, + TFMT5_8_SNORM = 4, + TFMT5_8_UINT = 5, + TFMT5_8_SINT = 6, + TFMT5_4_4_4_4_UNORM = 8, + TFMT5_5_5_5_1_UNORM = 10, + TFMT5_5_6_5_UNORM = 14, + TFMT5_8_8_UNORM = 15, + TFMT5_8_8_SNORM = 16, + TFMT5_8_8_UINT = 17, + TFMT5_8_8_SINT = 18, + TFMT5_L8_A8_UNORM = 19, + TFMT5_16_UNORM = 21, + TFMT5_16_SNORM = 22, + TFMT5_16_FLOAT = 23, + TFMT5_16_UINT = 24, + TFMT5_16_SINT = 25, + TFMT5_8_8_8_8_UNORM = 48, + TFMT5_8_8_8_UNORM = 49, + TFMT5_8_8_8_8_SNORM = 50, + TFMT5_8_8_8_8_UINT = 51, + TFMT5_8_8_8_8_SINT = 52, + TFMT5_9_9_9_E5_FLOAT = 53, + TFMT5_10_10_10_2_UNORM = 54, + TFMT5_10_10_10_2_UINT = 58, + TFMT5_11_11_10_FLOAT = 66, + TFMT5_16_16_UNORM = 67, + TFMT5_16_16_SNORM = 68, + TFMT5_16_16_FLOAT = 69, + TFMT5_16_16_UINT = 70, + TFMT5_16_16_SINT = 71, + TFMT5_32_FLOAT = 74, + TFMT5_32_UINT = 75, + TFMT5_32_SINT = 76, + TFMT5_16_16_16_16_UNORM = 96, + TFMT5_16_16_16_16_SNORM = 97, + TFMT5_16_16_16_16_FLOAT = 98, + TFMT5_16_16_16_16_UINT = 99, + TFMT5_16_16_16_16_SINT = 100, + TFMT5_32_32_FLOAT = 103, + TFMT5_32_32_UINT = 104, + TFMT5_32_32_SINT = 105, + TFMT5_32_32_32_UINT = 114, + TFMT5_32_32_32_SINT = 115, + TFMT5_32_32_32_FLOAT = 116, + TFMT5_32_32_32_32_FLOAT = 130, + TFMT5_32_32_32_32_UINT = 131, + TFMT5_32_32_32_32_SINT = 132, + TFMT5_X8Z24_UNORM = 160, + TFMT5_ETC2_RG11_UNORM = 171, + TFMT5_ETC2_RG11_SNORM = 172, + TFMT5_ETC2_R11_UNORM = 173, + TFMT5_ETC2_R11_SNORM = 174, + TFMT5_ETC1 = 175, + TFMT5_ETC2_RGB8 = 176, + TFMT5_ETC2_RGBA8 = 177, + TFMT5_ETC2_RGB8A1 = 178, + TFMT5_DXT1 = 179, + TFMT5_DXT3 = 180, + TFMT5_DXT5 = 181, + TFMT5_RGTC1_UNORM = 183, + TFMT5_RGTC1_SNORM = 184, + TFMT5_RGTC2_UNORM = 187, + TFMT5_RGTC2_SNORM = 188, + TFMT5_BPTC_UFLOAT = 190, + TFMT5_BPTC_FLOAT = 191, + TFMT5_BPTC = 192, + TFMT5_ASTC_4x4 = 193, + TFMT5_ASTC_5x4 = 194, + TFMT5_ASTC_5x5 = 195, + TFMT5_ASTC_6x5 = 196, + TFMT5_ASTC_6x6 = 197, + TFMT5_ASTC_8x5 = 198, + TFMT5_ASTC_8x6 = 199, + TFMT5_ASTC_8x8 = 200, + TFMT5_ASTC_10x5 = 201, + TFMT5_ASTC_10x6 = 202, + TFMT5_ASTC_10x8 = 203, + TFMT5_ASTC_10x10 = 204, + TFMT5_ASTC_12x10 = 205, + TFMT5_ASTC_12x12 = 206, + TFMT5_NONE = 255, +}; + +enum a5xx_depth_format { + DEPTH5_NONE = 0, + DEPTH5_16 = 1, + DEPTH5_24_8 = 2, + DEPTH5_32 = 4, +}; + +enum a5xx_blit_buf { + BLIT_MRT0 = 0, + BLIT_MRT1 = 1, + BLIT_MRT2 = 2, + BLIT_MRT3 = 3, + BLIT_MRT4 = 4, + BLIT_MRT5 = 5, + BLIT_MRT6 = 6, + BLIT_MRT7 = 7, + BLIT_ZS = 8, + BLIT_S = 9, +}; + +enum a5xx_cp_perfcounter_select { + PERF_CP_ALWAYS_COUNT = 0, + PERF_CP_BUSY_GFX_CORE_IDLE = 1, + PERF_CP_BUSY_CYCLES = 2, + PERF_CP_PFP_IDLE = 3, + PERF_CP_PFP_BUSY_WORKING = 4, + PERF_CP_PFP_STALL_CYCLES_ANY = 5, + PERF_CP_PFP_STARVE_CYCLES_ANY = 6, + PERF_CP_PFP_ICACHE_MISS = 7, + PERF_CP_PFP_ICACHE_HIT = 8, + PERF_CP_PFP_MATCH_PM4_PKT_PROFILE = 9, + PERF_CP_ME_BUSY_WORKING = 10, + PERF_CP_ME_IDLE = 11, + PERF_CP_ME_STARVE_CYCLES_ANY = 12, + PERF_CP_ME_FIFO_EMPTY_PFP_IDLE = 13, + PERF_CP_ME_FIFO_EMPTY_PFP_BUSY = 14, + PERF_CP_ME_FIFO_FULL_ME_BUSY = 15, + PERF_CP_ME_FIFO_FULL_ME_NON_WORKING = 16, + PERF_CP_ME_STALL_CYCLES_ANY = 17, + PERF_CP_ME_ICACHE_MISS = 18, + PERF_CP_ME_ICACHE_HIT = 19, + PERF_CP_NUM_PREEMPTIONS = 20, + PERF_CP_PREEMPTION_REACTION_DELAY = 21, + PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 22, + PERF_CP_PREEMPTION_SWITCH_IN_TIME = 23, + PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 24, + PERF_CP_PREDICATED_DRAWS_KILLED = 25, + PERF_CP_MODE_SWITCH = 26, + PERF_CP_ZPASS_DONE = 27, + PERF_CP_CONTEXT_DONE = 28, + PERF_CP_CACHE_FLUSH = 29, + PERF_CP_LONG_PREEMPTIONS = 30, +}; + +enum a5xx_rbbm_perfcounter_select { + PERF_RBBM_ALWAYS_COUNT = 0, + PERF_RBBM_ALWAYS_ON = 1, + PERF_RBBM_TSE_BUSY = 2, + PERF_RBBM_RAS_BUSY = 3, + PERF_RBBM_PC_DCALL_BUSY = 4, + PERF_RBBM_PC_VSD_BUSY = 5, + PERF_RBBM_STATUS_MASKED = 6, + PERF_RBBM_COM_BUSY = 7, + PERF_RBBM_DCOM_BUSY = 8, + PERF_RBBM_VBIF_BUSY = 9, + PERF_RBBM_VSC_BUSY = 10, + PERF_RBBM_TESS_BUSY = 11, + PERF_RBBM_UCHE_BUSY = 12, + PERF_RBBM_HLSQ_BUSY = 13, +}; + +enum a5xx_pc_perfcounter_select { + PERF_PC_BUSY_CYCLES = 0, + PERF_PC_WORKING_CYCLES = 1, + PERF_PC_STALL_CYCLES_VFD = 2, + PERF_PC_STALL_CYCLES_TSE = 3, + PERF_PC_STALL_CYCLES_VPC = 4, + PERF_PC_STALL_CYCLES_UCHE = 5, + PERF_PC_STALL_CYCLES_TESS = 6, + PERF_PC_STALL_CYCLES_TSE_ONLY = 7, + PERF_PC_STALL_CYCLES_VPC_ONLY = 8, + PERF_PC_PASS1_TF_STALL_CYCLES = 9, + PERF_PC_STARVE_CYCLES_FOR_INDEX = 10, + PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11, + PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12, + PERF_PC_STARVE_CYCLES_FOR_POSITION = 13, + PERF_PC_STARVE_CYCLES_DI = 14, + PERF_PC_VIS_STREAMS_LOADED = 15, + PERF_PC_INSTANCES = 16, + PERF_PC_VPC_PRIMITIVES = 17, + PERF_PC_DEAD_PRIM = 18, + PERF_PC_LIVE_PRIM = 19, + PERF_PC_VERTEX_HITS = 20, + PERF_PC_IA_VERTICES = 21, + PERF_PC_IA_PRIMITIVES = 22, + PERF_PC_GS_PRIMITIVES = 23, + PERF_PC_HS_INVOCATIONS = 24, + PERF_PC_DS_INVOCATIONS = 25, + PERF_PC_VS_INVOCATIONS = 26, + PERF_PC_GS_INVOCATIONS = 27, + PERF_PC_DS_PRIMITIVES = 28, + PERF_PC_VPC_POS_DATA_TRANSACTION = 29, + PERF_PC_3D_DRAWCALLS = 30, + PERF_PC_2D_DRAWCALLS = 31, + PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32, + PERF_TESS_BUSY_CYCLES = 33, + PERF_TESS_WORKING_CYCLES = 34, + PERF_TESS_STALL_CYCLES_PC = 35, + PERF_TESS_STARVE_CYCLES_PC = 36, +}; + +enum a5xx_vfd_perfcounter_select { + PERF_VFD_BUSY_CYCLES = 0, + PERF_VFD_STALL_CYCLES_UCHE = 1, + PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2, + PERF_VFD_STALL_CYCLES_MISS_VB = 3, + PERF_VFD_STALL_CYCLES_MISS_Q = 4, + PERF_VFD_STALL_CYCLES_SP_INFO = 5, + PERF_VFD_STALL_CYCLES_SP_ATTR = 6, + PERF_VFD_STALL_CYCLES_VFDP_VB = 7, + PERF_VFD_STALL_CYCLES_VFDP_Q = 8, + PERF_VFD_DECODER_PACKER_STALL = 9, + PERF_VFD_STARVE_CYCLES_UCHE = 10, + PERF_VFD_RBUFFER_FULL = 11, + PERF_VFD_ATTR_INFO_FIFO_FULL = 12, + PERF_VFD_DECODED_ATTRIBUTE_BYTES = 13, + PERF_VFD_NUM_ATTRIBUTES = 14, + PERF_VFD_INSTRUCTIONS = 15, + PERF_VFD_UPPER_SHADER_FIBERS = 16, + PERF_VFD_LOWER_SHADER_FIBERS = 17, + PERF_VFD_MODE_0_FIBERS = 18, + PERF_VFD_MODE_1_FIBERS = 19, + PERF_VFD_MODE_2_FIBERS = 20, + PERF_VFD_MODE_3_FIBERS = 21, + PERF_VFD_MODE_4_FIBERS = 22, + PERF_VFD_TOTAL_VERTICES = 23, + PERF_VFD_NUM_ATTR_MISS = 24, + PERF_VFD_1_BURST_REQ = 25, + PERF_VFDP_STALL_CYCLES_VFD = 26, + PERF_VFDP_STALL_CYCLES_VFD_INDEX = 27, + PERF_VFDP_STALL_CYCLES_VFD_PROG = 28, + PERF_VFDP_STARVE_CYCLES_PC = 29, + PERF_VFDP_VS_STAGE_32_WAVES = 30, +}; + +enum a5xx_hlsq_perfcounter_select { + PERF_HLSQ_BUSY_CYCLES = 0, + PERF_HLSQ_STALL_CYCLES_UCHE = 1, + PERF_HLSQ_STALL_CYCLES_SP_STATE = 2, + PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3, + PERF_HLSQ_UCHE_LATENCY_CYCLES = 4, + PERF_HLSQ_UCHE_LATENCY_COUNT = 5, + PERF_HLSQ_FS_STAGE_32_WAVES = 6, + PERF_HLSQ_FS_STAGE_64_WAVES = 7, + PERF_HLSQ_QUADS = 8, + PERF_HLSQ_SP_STATE_COPY_TRANS_FS_STAGE = 9, + PERF_HLSQ_SP_STATE_COPY_TRANS_VS_STAGE = 10, + PERF_HLSQ_TP_STATE_COPY_TRANS_FS_STAGE = 11, + PERF_HLSQ_TP_STATE_COPY_TRANS_VS_STAGE = 12, + PERF_HLSQ_CS_INVOCATIONS = 13, + PERF_HLSQ_COMPUTE_DRAWCALLS = 14, +}; + +enum a5xx_vpc_perfcounter_select { + PERF_VPC_BUSY_CYCLES = 0, + PERF_VPC_WORKING_CYCLES = 1, + PERF_VPC_STALL_CYCLES_UCHE = 2, + PERF_VPC_STALL_CYCLES_VFD_WACK = 3, + PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4, + PERF_VPC_STALL_CYCLES_PC = 5, + PERF_VPC_STALL_CYCLES_SP_LM = 6, + PERF_VPC_POS_EXPORT_STALL_CYCLES = 7, + PERF_VPC_STARVE_CYCLES_SP = 8, + PERF_VPC_STARVE_CYCLES_LRZ = 9, + PERF_VPC_PC_PRIMITIVES = 10, + PERF_VPC_SP_COMPONENTS = 11, + PERF_VPC_SP_LM_PRIMITIVES = 12, + PERF_VPC_SP_LM_COMPONENTS = 13, + PERF_VPC_SP_LM_DWORDS = 14, + PERF_VPC_STREAMOUT_COMPONENTS = 15, + PERF_VPC_GRANT_PHASES = 16, +}; + +enum a5xx_tse_perfcounter_select { + PERF_TSE_BUSY_CYCLES = 0, + PERF_TSE_CLIPPING_CYCLES = 1, + PERF_TSE_STALL_CYCLES_RAS = 2, + PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3, + PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4, + PERF_TSE_STARVE_CYCLES_PC = 5, + PERF_TSE_INPUT_PRIM = 6, + PERF_TSE_INPUT_NULL_PRIM = 7, + PERF_TSE_TRIVAL_REJ_PRIM = 8, + PERF_TSE_CLIPPED_PRIM = 9, + PERF_TSE_ZERO_AREA_PRIM = 10, + PERF_TSE_FACENESS_CULLED_PRIM = 11, + PERF_TSE_ZERO_PIXEL_PRIM = 12, + PERF_TSE_OUTPUT_NULL_PRIM = 13, + PERF_TSE_OUTPUT_VISIBLE_PRIM = 14, + PERF_TSE_CINVOCATION = 15, + PERF_TSE_CPRIMITIVES = 16, + PERF_TSE_2D_INPUT_PRIM = 17, + PERF_TSE_2D_ALIVE_CLCLES = 18, +}; + +enum a5xx_ras_perfcounter_select { + PERF_RAS_BUSY_CYCLES = 0, + PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1, + PERF_RAS_STALL_CYCLES_LRZ = 2, + PERF_RAS_STARVE_CYCLES_TSE = 3, + PERF_RAS_SUPER_TILES = 4, + PERF_RAS_8X4_TILES = 5, + PERF_RAS_MASKGEN_ACTIVE = 6, + PERF_RAS_FULLY_COVERED_SUPER_TILES = 7, + PERF_RAS_FULLY_COVERED_8X4_TILES = 8, + PERF_RAS_PRIM_KILLED_INVISILBE = 9, +}; + +enum a5xx_lrz_perfcounter_select { + PERF_LRZ_BUSY_CYCLES = 0, + PERF_LRZ_STARVE_CYCLES_RAS = 1, + PERF_LRZ_STALL_CYCLES_RB = 2, + PERF_LRZ_STALL_CYCLES_VSC = 3, + PERF_LRZ_STALL_CYCLES_VPC = 4, + PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5, + PERF_LRZ_STALL_CYCLES_UCHE = 6, + PERF_LRZ_LRZ_READ = 7, + PERF_LRZ_LRZ_WRITE = 8, + PERF_LRZ_READ_LATENCY = 9, + PERF_LRZ_MERGE_CACHE_UPDATING = 10, + PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11, + PERF_LRZ_PRIM_KILLED_BY_LRZ = 12, + PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13, + PERF_LRZ_FULL_8X8_TILES = 14, + PERF_LRZ_PARTIAL_8X8_TILES = 15, + PERF_LRZ_TILE_KILLED = 16, + PERF_LRZ_TOTAL_PIXEL = 17, + PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18, +}; + +enum a5xx_uche_perfcounter_select { + PERF_UCHE_BUSY_CYCLES = 0, + PERF_UCHE_STALL_CYCLES_VBIF = 1, + PERF_UCHE_VBIF_LATENCY_CYCLES = 2, + PERF_UCHE_VBIF_LATENCY_SAMPLES = 3, + PERF_UCHE_VBIF_READ_BEATS_TP = 4, + PERF_UCHE_VBIF_READ_BEATS_VFD = 5, + PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6, + PERF_UCHE_VBIF_READ_BEATS_LRZ = 7, + PERF_UCHE_VBIF_READ_BEATS_SP = 8, + PERF_UCHE_READ_REQUESTS_TP = 9, + PERF_UCHE_READ_REQUESTS_VFD = 10, + PERF_UCHE_READ_REQUESTS_HLSQ = 11, + PERF_UCHE_READ_REQUESTS_LRZ = 12, + PERF_UCHE_READ_REQUESTS_SP = 13, + PERF_UCHE_WRITE_REQUESTS_LRZ = 14, + PERF_UCHE_WRITE_REQUESTS_SP = 15, + PERF_UCHE_WRITE_REQUESTS_VPC = 16, + PERF_UCHE_WRITE_REQUESTS_VSC = 17, + PERF_UCHE_EVICTS = 18, + PERF_UCHE_BANK_REQ0 = 19, + PERF_UCHE_BANK_REQ1 = 20, + PERF_UCHE_BANK_REQ2 = 21, + PERF_UCHE_BANK_REQ3 = 22, + PERF_UCHE_BANK_REQ4 = 23, + PERF_UCHE_BANK_REQ5 = 24, + PERF_UCHE_BANK_REQ6 = 25, + PERF_UCHE_BANK_REQ7 = 26, + PERF_UCHE_VBIF_READ_BEATS_CH0 = 27, + PERF_UCHE_VBIF_READ_BEATS_CH1 = 28, + PERF_UCHE_GMEM_READ_BEATS = 29, + PERF_UCHE_FLAG_COUNT = 30, +}; + +enum a5xx_tp_perfcounter_select { + PERF_TP_BUSY_CYCLES = 0, + PERF_TP_STALL_CYCLES_UCHE = 1, + PERF_TP_LATENCY_CYCLES = 2, + PERF_TP_LATENCY_TRANS = 3, + PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4, + PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5, + PERF_TP_L1_CACHELINE_REQUESTS = 6, + PERF_TP_L1_CACHELINE_MISSES = 7, + PERF_TP_SP_TP_TRANS = 8, + PERF_TP_TP_SP_TRANS = 9, + PERF_TP_OUTPUT_PIXELS = 10, + PERF_TP_FILTER_WORKLOAD_16BIT = 11, + PERF_TP_FILTER_WORKLOAD_32BIT = 12, + PERF_TP_QUADS_RECEIVED = 13, + PERF_TP_QUADS_OFFSET = 14, + PERF_TP_QUADS_SHADOW = 15, + PERF_TP_QUADS_ARRAY = 16, + PERF_TP_QUADS_GRADIENT = 17, + PERF_TP_QUADS_1D = 18, + PERF_TP_QUADS_2D = 19, + PERF_TP_QUADS_BUFFER = 20, + PERF_TP_QUADS_3D = 21, + PERF_TP_QUADS_CUBE = 22, + PERF_TP_STATE_CACHE_REQUESTS = 23, + PERF_TP_STATE_CACHE_MISSES = 24, + PERF_TP_DIVERGENT_QUADS_RECEIVED = 25, + PERF_TP_BINDLESS_STATE_CACHE_REQUESTS = 26, + PERF_TP_BINDLESS_STATE_CACHE_MISSES = 27, + PERF_TP_PRT_NON_RESIDENT_EVENTS = 28, + PERF_TP_OUTPUT_PIXELS_POINT = 29, + PERF_TP_OUTPUT_PIXELS_BILINEAR = 30, + PERF_TP_OUTPUT_PIXELS_MIP = 31, + PERF_TP_OUTPUT_PIXELS_ANISO = 32, + PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 33, + PERF_TP_FLAG_CACHE_REQUESTS = 34, + PERF_TP_FLAG_CACHE_MISSES = 35, + PERF_TP_L1_5_L2_REQUESTS = 36, + PERF_TP_2D_OUTPUT_PIXELS = 37, + PERF_TP_2D_OUTPUT_PIXELS_POINT = 38, + PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 39, + PERF_TP_2D_FILTER_WORKLOAD_16BIT = 40, + PERF_TP_2D_FILTER_WORKLOAD_32BIT = 41, +}; + +enum a5xx_sp_perfcounter_select { + PERF_SP_BUSY_CYCLES = 0, + PERF_SP_ALU_WORKING_CYCLES = 1, + PERF_SP_EFU_WORKING_CYCLES = 2, + PERF_SP_STALL_CYCLES_VPC = 3, + PERF_SP_STALL_CYCLES_TP = 4, + PERF_SP_STALL_CYCLES_UCHE = 5, + PERF_SP_STALL_CYCLES_RB = 6, + PERF_SP_SCHEDULER_NON_WORKING = 7, + PERF_SP_WAVE_CONTEXTS = 8, + PERF_SP_WAVE_CONTEXT_CYCLES = 9, + PERF_SP_FS_STAGE_WAVE_CYCLES = 10, + PERF_SP_FS_STAGE_WAVE_SAMPLES = 11, + PERF_SP_VS_STAGE_WAVE_CYCLES = 12, + PERF_SP_VS_STAGE_WAVE_SAMPLES = 13, + PERF_SP_FS_STAGE_DURATION_CYCLES = 14, + PERF_SP_VS_STAGE_DURATION_CYCLES = 15, + PERF_SP_WAVE_CTRL_CYCLES = 16, + PERF_SP_WAVE_LOAD_CYCLES = 17, + PERF_SP_WAVE_EMIT_CYCLES = 18, + PERF_SP_WAVE_NOP_CYCLES = 19, + PERF_SP_WAVE_WAIT_CYCLES = 20, + PERF_SP_WAVE_FETCH_CYCLES = 21, + PERF_SP_WAVE_IDLE_CYCLES = 22, + PERF_SP_WAVE_END_CYCLES = 23, + PERF_SP_WAVE_LONG_SYNC_CYCLES = 24, + PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25, + PERF_SP_WAVE_JOIN_CYCLES = 26, + PERF_SP_LM_LOAD_INSTRUCTIONS = 27, + PERF_SP_LM_STORE_INSTRUCTIONS = 28, + PERF_SP_LM_ATOMICS = 29, + PERF_SP_GM_LOAD_INSTRUCTIONS = 30, + PERF_SP_GM_STORE_INSTRUCTIONS = 31, + PERF_SP_GM_ATOMICS = 32, + PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33, + PERF_SP_VS_STAGE_CFLOW_INSTRUCTIONS = 34, + PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 35, + PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 36, + PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 37, + PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 38, + PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 39, + PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 40, + PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 41, + PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 42, + PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 43, + PERF_SP_VS_INSTRUCTIONS = 44, + PERF_SP_FS_INSTRUCTIONS = 45, + PERF_SP_ADDR_LOCK_COUNT = 46, + PERF_SP_UCHE_READ_TRANS = 47, + PERF_SP_UCHE_WRITE_TRANS = 48, + PERF_SP_EXPORT_VPC_TRANS = 49, + PERF_SP_EXPORT_RB_TRANS = 50, + PERF_SP_PIXELS_KILLED = 51, + PERF_SP_ICL1_REQUESTS = 52, + PERF_SP_ICL1_MISSES = 53, + PERF_SP_ICL0_REQUESTS = 54, + PERF_SP_ICL0_MISSES = 55, + PERF_SP_HS_INSTRUCTIONS = 56, + PERF_SP_DS_INSTRUCTIONS = 57, + PERF_SP_GS_INSTRUCTIONS = 58, + PERF_SP_CS_INSTRUCTIONS = 59, + PERF_SP_GPR_READ = 60, + PERF_SP_GPR_WRITE = 61, + PERF_SP_LM_CH0_REQUESTS = 62, + PERF_SP_LM_CH1_REQUESTS = 63, + PERF_SP_LM_BANK_CONFLICTS = 64, +}; + +enum a5xx_rb_perfcounter_select { + PERF_RB_BUSY_CYCLES = 0, + PERF_RB_STALL_CYCLES_CCU = 1, + PERF_RB_STALL_CYCLES_HLSQ = 2, + PERF_RB_STALL_CYCLES_FIFO0_FULL = 3, + PERF_RB_STALL_CYCLES_FIFO1_FULL = 4, + PERF_RB_STALL_CYCLES_FIFO2_FULL = 5, + PERF_RB_STARVE_CYCLES_SP = 6, + PERF_RB_STARVE_CYCLES_LRZ_TILE = 7, + PERF_RB_STARVE_CYCLES_CCU = 8, + PERF_RB_STARVE_CYCLES_Z_PLANE = 9, + PERF_RB_STARVE_CYCLES_BARY_PLANE = 10, + PERF_RB_Z_WORKLOAD = 11, + PERF_RB_HLSQ_ACTIVE = 12, + PERF_RB_Z_READ = 13, + PERF_RB_Z_WRITE = 14, + PERF_RB_C_READ = 15, + PERF_RB_C_WRITE = 16, + PERF_RB_TOTAL_PASS = 17, + PERF_RB_Z_PASS = 18, + PERF_RB_Z_FAIL = 19, + PERF_RB_S_FAIL = 20, + PERF_RB_BLENDED_FXP_COMPONENTS = 21, + PERF_RB_BLENDED_FP16_COMPONENTS = 22, + RB_RESERVED = 23, + PERF_RB_2D_ALIVE_CYCLES = 24, + PERF_RB_2D_STALL_CYCLES_A2D = 25, + PERF_RB_2D_STARVE_CYCLES_SRC = 26, + PERF_RB_2D_STARVE_CYCLES_SP = 27, + PERF_RB_2D_STARVE_CYCLES_DST = 28, + PERF_RB_2D_VALID_PIXELS = 29, +}; + +enum a5xx_rb_samples_perfcounter_select { + TOTAL_SAMPLES = 0, + ZPASS_SAMPLES = 1, + ZFAIL_SAMPLES = 2, + SFAIL_SAMPLES = 3, +}; + +enum a5xx_vsc_perfcounter_select { + PERF_VSC_BUSY_CYCLES = 0, + PERF_VSC_WORKING_CYCLES = 1, + PERF_VSC_STALL_CYCLES_UCHE = 2, + PERF_VSC_EOT_NUM = 3, +}; + +enum a5xx_ccu_perfcounter_select { + PERF_CCU_BUSY_CYCLES = 0, + PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1, + PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2, + PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3, + PERF_CCU_DEPTH_BLOCKS = 4, + PERF_CCU_COLOR_BLOCKS = 5, + PERF_CCU_DEPTH_BLOCK_HIT = 6, + PERF_CCU_COLOR_BLOCK_HIT = 7, + PERF_CCU_PARTIAL_BLOCK_READ = 8, + PERF_CCU_GMEM_READ = 9, + PERF_CCU_GMEM_WRITE = 10, + PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11, + PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12, + PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13, + PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14, + PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15, + PERF_CCU_COLOR_READ_FLAG0_COUNT = 16, + PERF_CCU_COLOR_READ_FLAG1_COUNT = 17, + PERF_CCU_COLOR_READ_FLAG2_COUNT = 18, + PERF_CCU_COLOR_READ_FLAG3_COUNT = 19, + PERF_CCU_COLOR_READ_FLAG4_COUNT = 20, + PERF_CCU_2D_BUSY_CYCLES = 21, + PERF_CCU_2D_RD_REQ = 22, + PERF_CCU_2D_WR_REQ = 23, + PERF_CCU_2D_REORDER_STARVE_CYCLES = 24, + PERF_CCU_2D_PIXELS = 25, +}; + +enum a5xx_cmp_perfcounter_select { + PERF_CMPDECMP_STALL_CYCLES_VBIF = 0, + PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1, + PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2, + PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3, + PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4, + PERF_CMPDECMP_VBIF_READ_REQUEST = 5, + PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6, + PERF_CMPDECMP_VBIF_READ_DATA = 7, + PERF_CMPDECMP_VBIF_WRITE_DATA = 8, + PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9, + PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10, + PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11, + PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12, + PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13, + PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14, + PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 15, + PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 16, + PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 17, + PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 18, + PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 19, + PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 20, + PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 21, + PERF_CMPDECMP_2D_RD_DATA = 22, + PERF_CMPDECMP_2D_WR_DATA = 23, +}; + +enum a5xx_vbif_perfcounter_select { + AXI_READ_REQUESTS_ID_0 = 0, + AXI_READ_REQUESTS_ID_1 = 1, + AXI_READ_REQUESTS_ID_2 = 2, + AXI_READ_REQUESTS_ID_3 = 3, + AXI_READ_REQUESTS_ID_4 = 4, + AXI_READ_REQUESTS_ID_5 = 5, + AXI_READ_REQUESTS_ID_6 = 6, + AXI_READ_REQUESTS_ID_7 = 7, + AXI_READ_REQUESTS_ID_8 = 8, + AXI_READ_REQUESTS_ID_9 = 9, + AXI_READ_REQUESTS_ID_10 = 10, + AXI_READ_REQUESTS_ID_11 = 11, + AXI_READ_REQUESTS_ID_12 = 12, + AXI_READ_REQUESTS_ID_13 = 13, + AXI_READ_REQUESTS_ID_14 = 14, + AXI_READ_REQUESTS_ID_15 = 15, + AXI0_READ_REQUESTS_TOTAL = 16, + AXI1_READ_REQUESTS_TOTAL = 17, + AXI2_READ_REQUESTS_TOTAL = 18, + AXI3_READ_REQUESTS_TOTAL = 19, + AXI_READ_REQUESTS_TOTAL = 20, + AXI_WRITE_REQUESTS_ID_0 = 21, + AXI_WRITE_REQUESTS_ID_1 = 22, + AXI_WRITE_REQUESTS_ID_2 = 23, + AXI_WRITE_REQUESTS_ID_3 = 24, + AXI_WRITE_REQUESTS_ID_4 = 25, + AXI_WRITE_REQUESTS_ID_5 = 26, + AXI_WRITE_REQUESTS_ID_6 = 27, + AXI_WRITE_REQUESTS_ID_7 = 28, + AXI_WRITE_REQUESTS_ID_8 = 29, + AXI_WRITE_REQUESTS_ID_9 = 30, + AXI_WRITE_REQUESTS_ID_10 = 31, + AXI_WRITE_REQUESTS_ID_11 = 32, + AXI_WRITE_REQUESTS_ID_12 = 33, + AXI_WRITE_REQUESTS_ID_13 = 34, + AXI_WRITE_REQUESTS_ID_14 = 35, + AXI_WRITE_REQUESTS_ID_15 = 36, + AXI0_WRITE_REQUESTS_TOTAL = 37, + AXI1_WRITE_REQUESTS_TOTAL = 38, + AXI2_WRITE_REQUESTS_TOTAL = 39, + AXI3_WRITE_REQUESTS_TOTAL = 40, + AXI_WRITE_REQUESTS_TOTAL = 41, + AXI_TOTAL_REQUESTS = 42, + AXI_READ_DATA_BEATS_ID_0 = 43, + AXI_READ_DATA_BEATS_ID_1 = 44, + AXI_READ_DATA_BEATS_ID_2 = 45, + AXI_READ_DATA_BEATS_ID_3 = 46, + AXI_READ_DATA_BEATS_ID_4 = 47, + AXI_READ_DATA_BEATS_ID_5 = 48, + AXI_READ_DATA_BEATS_ID_6 = 49, + AXI_READ_DATA_BEATS_ID_7 = 50, + AXI_READ_DATA_BEATS_ID_8 = 51, + AXI_READ_DATA_BEATS_ID_9 = 52, + AXI_READ_DATA_BEATS_ID_10 = 53, + AXI_READ_DATA_BEATS_ID_11 = 54, + AXI_READ_DATA_BEATS_ID_12 = 55, + AXI_READ_DATA_BEATS_ID_13 = 56, + AXI_READ_DATA_BEATS_ID_14 = 57, + AXI_READ_DATA_BEATS_ID_15 = 58, + AXI0_READ_DATA_BEATS_TOTAL = 59, + AXI1_READ_DATA_BEATS_TOTAL = 60, + AXI2_READ_DATA_BEATS_TOTAL = 61, + AXI3_READ_DATA_BEATS_TOTAL = 62, + AXI_READ_DATA_BEATS_TOTAL = 63, + AXI_WRITE_DATA_BEATS_ID_0 = 64, + AXI_WRITE_DATA_BEATS_ID_1 = 65, + AXI_WRITE_DATA_BEATS_ID_2 = 66, + AXI_WRITE_DATA_BEATS_ID_3 = 67, + AXI_WRITE_DATA_BEATS_ID_4 = 68, + AXI_WRITE_DATA_BEATS_ID_5 = 69, + AXI_WRITE_DATA_BEATS_ID_6 = 70, + AXI_WRITE_DATA_BEATS_ID_7 = 71, + AXI_WRITE_DATA_BEATS_ID_8 = 72, + AXI_WRITE_DATA_BEATS_ID_9 = 73, + AXI_WRITE_DATA_BEATS_ID_10 = 74, + AXI_WRITE_DATA_BEATS_ID_11 = 75, + AXI_WRITE_DATA_BEATS_ID_12 = 76, + AXI_WRITE_DATA_BEATS_ID_13 = 77, + AXI_WRITE_DATA_BEATS_ID_14 = 78, + AXI_WRITE_DATA_BEATS_ID_15 = 79, + AXI0_WRITE_DATA_BEATS_TOTAL = 80, + AXI1_WRITE_DATA_BEATS_TOTAL = 81, + AXI2_WRITE_DATA_BEATS_TOTAL = 82, + AXI3_WRITE_DATA_BEATS_TOTAL = 83, + AXI_WRITE_DATA_BEATS_TOTAL = 84, + AXI_DATA_BEATS_TOTAL = 85, +}; + +enum a5xx_tex_filter { + A5XX_TEX_NEAREST = 0, + A5XX_TEX_LINEAR = 1, + A5XX_TEX_ANISO = 2, +}; + +enum a5xx_tex_clamp { + A5XX_TEX_REPEAT = 0, + A5XX_TEX_CLAMP_TO_EDGE = 1, + A5XX_TEX_MIRROR_REPEAT = 2, + A5XX_TEX_CLAMP_TO_BORDER = 3, + A5XX_TEX_MIRROR_CLAMP = 4, +}; + +enum a5xx_tex_aniso { + A5XX_TEX_ANISO_1 = 0, + A5XX_TEX_ANISO_2 = 1, + A5XX_TEX_ANISO_4 = 2, + A5XX_TEX_ANISO_8 = 3, + A5XX_TEX_ANISO_16 = 4, +}; + +enum a5xx_tex_swiz { + A5XX_TEX_X = 0, + A5XX_TEX_Y = 1, + A5XX_TEX_Z = 2, + A5XX_TEX_W = 3, + A5XX_TEX_ZERO = 4, + A5XX_TEX_ONE = 5, +}; + +enum a5xx_tex_type { + A5XX_TEX_1D = 0, + A5XX_TEX_2D = 1, + A5XX_TEX_CUBE = 2, + A5XX_TEX_3D = 3, + A5XX_TEX_BUFFER = 4, +}; + +#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001 +#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002 +#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004 +#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008 +#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010 +#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020 +#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040 +#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080 +#define A5XX_INT0_CP_SW 0x00000100 +#define A5XX_INT0_CP_HW_ERROR 0x00000200 +#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400 +#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800 +#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000 +#define A5XX_INT0_CP_IB2 0x00002000 +#define A5XX_INT0_CP_IB1 0x00004000 +#define A5XX_INT0_CP_RB 0x00008000 +#define A5XX_INT0_CP_UNUSED_1 0x00010000 +#define A5XX_INT0_CP_RB_DONE_TS 0x00020000 +#define A5XX_INT0_CP_WT_DONE_TS 0x00040000 +#define A5XX_INT0_UNKNOWN_1 0x00080000 +#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000 +#define A5XX_INT0_UNUSED_2 0x00200000 +#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000 +#define A5XX_INT0_MISC_HANG_DETECT 0x00800000 +#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000 +#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000 +#define A5XX_INT0_DEBBUS_INTR_0 0x04000000 +#define A5XX_INT0_DEBBUS_INTR_1 0x08000000 +#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000 +#define A5XX_INT0_GPMU_FIRMWARE 0x20000000 +#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000 +#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000 +#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001 +#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002 +#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004 +#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008 +#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010 +#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020 +#define REG_A5XX_CP_RB_BASE 0x00000800 + +#define REG_A5XX_CP_RB_BASE_HI 0x00000801 + +#define REG_A5XX_CP_RB_CNTL 0x00000802 + +#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804 + +#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805 + +#define REG_A5XX_CP_RB_RPTR 0x00000806 + +#define REG_A5XX_CP_RB_WPTR 0x00000807 + +#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808 + +#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809 + +#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b + +#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c + +#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d + +#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e + +#define REG_A5XX_CP_ME_NRT_DATA 0x00000810 + +#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817 + +#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818 + +#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819 + +#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a + +#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f + +#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820 + +#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821 + +#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822 + +#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823 + +#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824 + +#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825 + +#define REG_A5XX_CP_MERCIU_SIZE 0x00000826 + +#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827 + +#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828 + +#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829 + +#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a + +#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b + +#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f + +#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830 + +#define REG_A5XX_CP_CNTL 0x00000831 + +#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832 + +#define REG_A5XX_CP_CHICKEN_DBG 0x00000833 + +#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835 + +#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836 + +#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838 + +#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839 + +#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b + +#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c + +#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d + +#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e + +#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f + +#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840 + +#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841 + +#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860 + +#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14 + +#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15 + +#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18 + +#define REG_A5XX_CP_HW_FAULT 0x00000b1a + +#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c + +#define REG_A5XX_CP_IB1_BASE 0x00000b1f + +#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20 + +#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21 + +#define REG_A5XX_CP_IB2_BASE 0x00000b22 + +#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23 + +#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24 + +static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; } + +static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; } + +static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; } + +static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; } +#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff +#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 +static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) +{ + return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK; +} +#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000 +#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24 +static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) +{ + return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK; +} +#define A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000 +#define A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29 +static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val) +{ + return ((val) << A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK; +} +#define A5XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000 +#define A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30 +static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_READ(uint32_t val) +{ + return ((val) << A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_READ__MASK; +} + +#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0 + +#define REG_A5XX_CP_AHB_FAULT 0x00000b1b + +#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6 + +#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7 + +#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1 + +#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba + +#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb + +#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc + +#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004 + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005 + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006 + +#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007 + +#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008 + +#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009 + +#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018 + +#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a + +#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013 + +#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014 + +#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c + +#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d + +#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e + +#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f + +#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020 + +#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021 + +#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022 + +#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023 + +#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024 + +#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f + +#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037 + +#define REG_A5XX_RBBM_INT_0_MASK 0x00000038 +#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001 +#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002 +#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004 +#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008 +#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010 +#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020 +#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040 +#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080 +#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100 +#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200 +#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400 +#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800 +#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000 +#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000 +#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000 +#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000 +#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000 +#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000 +#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000 +#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000 +#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000 +#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000 +#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000 +#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000 +#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000 +#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000 +#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000 +#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000 +#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000 + +#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f + +#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041 + +#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043 + +#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045 + +#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046 + +#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048 + +#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049 + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c + +#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050 + +#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054 + +#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055 + +#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059 + +#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a + +#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b + +#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c + +#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d + +#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e + +#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f + +#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060 + +#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061 + +#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062 + +#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063 + +#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065 + +#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066 + +#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067 + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068 + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069 + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a + +#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e + +#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f + +#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070 + +#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071 + +#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072 + +#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076 + +#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a + +#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f + +#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080 + +#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084 + +#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088 + +#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089 + +#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a + +#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e + +#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f + +#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090 + +#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091 + +#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092 + +#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093 + +#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094 + +#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095 + +#define REG_A5XX_RBBM_AHB_CMD 0x00000096 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2 + +#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6 + +#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7 + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8 + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9 + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa + +#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae + +#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af + +#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0 + +#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1 + +#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2 + +#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6 + +#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7 + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8 + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9 + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba + +#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb + +#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8 + +#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9 + +#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca + +#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0 + +#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1 + +#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2 + +#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3 + +#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4 + +#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5 + +#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6 + +#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7 + +#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8 + +#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9 + +#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa + +#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab + +#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac + +#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad + +#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae + +#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af + +#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7 + +#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8 + +#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9 + +#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba + +#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb + +#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc + +#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd + +#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be + +#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf + +#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0 + +#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1 + +#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2 + +#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3 + +#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4 + +#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5 + +#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6 + +#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7 + +#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8 + +#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9 + +#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca + +#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb + +#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc + +#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd + +#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce + +#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf + +#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0 + +#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1 + +#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2 + +#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3 + +#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4 + +#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5 + +#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6 + +#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6 + +#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7 + +#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8 + +#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9 + +#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea + +#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb + +#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec + +#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed + +#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee + +#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef + +#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0 + +#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1 + +#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2 + +#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3 + +#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4 + +#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5 + +#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6 + +#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7 + +#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8 + +#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9 + +#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa + +#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb + +#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc + +#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd + +#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe + +#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff + +#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400 + +#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401 + +#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402 + +#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403 + +#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404 + +#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405 + +#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406 + +#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a + +#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b + +#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c + +#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d + +#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e + +#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f + +#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416 + +#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417 + +#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418 + +#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419 + +#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a + +#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b + +#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c + +#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d + +#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e + +#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f + +#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420 + +#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421 + +#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422 + +#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423 + +#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424 + +#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425 + +#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426 + +#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427 + +#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428 + +#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429 + +#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a + +#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b + +#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c + +#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d + +#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e + +#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f + +#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430 + +#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431 + +#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432 + +#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433 + +#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434 + +#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435 + +#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436 + +#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437 + +#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438 + +#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439 + +#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a + +#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b + +#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c + +#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d + +#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e + +#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f + +#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440 + +#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441 + +#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442 + +#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443 + +#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444 + +#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445 + +#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446 + +#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447 + +#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448 + +#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449 + +#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a + +#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b + +#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c + +#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d + +#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e + +#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f + +#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450 + +#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451 + +#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452 + +#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459 + +#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a + +#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b + +#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c + +#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d + +#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e + +#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f + +#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460 + +#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461 + +#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462 + +#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463 + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d + +#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e + +#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2 + +#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3 + +#define REG_A5XX_RBBM_STATUS 0x000004f5 +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK 0x80000000 +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT 31 +static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK; +} +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK 0x40000000 +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT 30 +static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK; +} +#define A5XX_RBBM_STATUS_HLSQ_BUSY__MASK 0x20000000 +#define A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT 29 +static inline uint32_t A5XX_RBBM_STATUS_HLSQ_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT) & A5XX_RBBM_STATUS_HLSQ_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_VSC_BUSY__MASK 0x10000000 +#define A5XX_RBBM_STATUS_VSC_BUSY__SHIFT 28 +static inline uint32_t A5XX_RBBM_STATUS_VSC_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_VSC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VSC_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_TPL1_BUSY__MASK 0x08000000 +#define A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT 27 +static inline uint32_t A5XX_RBBM_STATUS_TPL1_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT) & A5XX_RBBM_STATUS_TPL1_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_SP_BUSY__MASK 0x04000000 +#define A5XX_RBBM_STATUS_SP_BUSY__SHIFT 26 +static inline uint32_t A5XX_RBBM_STATUS_SP_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_SP_BUSY__SHIFT) & A5XX_RBBM_STATUS_SP_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_UCHE_BUSY__MASK 0x02000000 +#define A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT 25 +static inline uint32_t A5XX_RBBM_STATUS_UCHE_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_UCHE_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_VPC_BUSY__MASK 0x01000000 +#define A5XX_RBBM_STATUS_VPC_BUSY__SHIFT 24 +static inline uint32_t A5XX_RBBM_STATUS_VPC_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_VPC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VPC_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_VFDP_BUSY__MASK 0x00800000 +#define A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT 23 +static inline uint32_t A5XX_RBBM_STATUS_VFDP_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFDP_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_VFD_BUSY__MASK 0x00400000 +#define A5XX_RBBM_STATUS_VFD_BUSY__SHIFT 22 +static inline uint32_t A5XX_RBBM_STATUS_VFD_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_VFD_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFD_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_TESS_BUSY__MASK 0x00200000 +#define A5XX_RBBM_STATUS_TESS_BUSY__SHIFT 21 +static inline uint32_t A5XX_RBBM_STATUS_TESS_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_TESS_BUSY__SHIFT) & A5XX_RBBM_STATUS_TESS_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK 0x00100000 +#define A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT 20 +static inline uint32_t A5XX_RBBM_STATUS_PC_VSD_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK 0x00080000 +#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT 19 +static inline uint32_t A5XX_RBBM_STATUS_PC_DCALL_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK 0x00040000 +#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT 18 +static inline uint32_t A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_DCOM_BUSY__MASK 0x00020000 +#define A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT 17 +static inline uint32_t A5XX_RBBM_STATUS_DCOM_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT) & A5XX_RBBM_STATUS_DCOM_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_COM_BUSY__MASK 0x00010000 +#define A5XX_RBBM_STATUS_COM_BUSY__SHIFT 16 +static inline uint32_t A5XX_RBBM_STATUS_COM_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_COM_BUSY__SHIFT) & A5XX_RBBM_STATUS_COM_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_LRZ_BUZY__MASK 0x00008000 +#define A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT 15 +static inline uint32_t A5XX_RBBM_STATUS_LRZ_BUZY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT) & A5XX_RBBM_STATUS_LRZ_BUZY__MASK; +} +#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK 0x00004000 +#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT 14 +static inline uint32_t A5XX_RBBM_STATUS_A2D_DSP_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT) & A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK 0x00002000 +#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT 13 +static inline uint32_t A5XX_RBBM_STATUS_CCUFCHE_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_RB_BUSY__MASK 0x00001000 +#define A5XX_RBBM_STATUS_RB_BUSY__SHIFT 12 +static inline uint32_t A5XX_RBBM_STATUS_RB_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_RB_BUSY__SHIFT) & A5XX_RBBM_STATUS_RB_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_RAS_BUSY__MASK 0x00000800 +#define A5XX_RBBM_STATUS_RAS_BUSY__SHIFT 11 +static inline uint32_t A5XX_RBBM_STATUS_RAS_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_RAS_BUSY__SHIFT) & A5XX_RBBM_STATUS_RAS_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_TSE_BUSY__MASK 0x00000400 +#define A5XX_RBBM_STATUS_TSE_BUSY__SHIFT 10 +static inline uint32_t A5XX_RBBM_STATUS_TSE_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_TSE_BUSY__SHIFT) & A5XX_RBBM_STATUS_TSE_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_VBIF_BUSY__MASK 0x00000200 +#define A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT 9 +static inline uint32_t A5XX_RBBM_STATUS_VBIF_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT) & A5XX_RBBM_STATUS_VBIF_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK 0x00000100 +#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT 8 +static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK; +} +#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK 0x00000080 +#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT 7 +static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK; +} +#define A5XX_RBBM_STATUS_CP_BUSY__MASK 0x00000040 +#define A5XX_RBBM_STATUS_CP_BUSY__SHIFT 6 +static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_CP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK 0x00000020 +#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT 5 +static inline uint32_t A5XX_RBBM_STATUS_GPMU_MASTER_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK 0x00000010 +#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT 4 +static inline uint32_t A5XX_RBBM_STATUS_CP_CRASH_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK 0x00000008 +#define A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT 3 +static inline uint32_t A5XX_RBBM_STATUS_CP_ETS_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK 0x00000004 +#define A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT 2 +static inline uint32_t A5XX_RBBM_STATUS_CP_PFP_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_CP_ME_BUSY__MASK 0x00000002 +#define A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT 1 +static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val) +{ + return ((val) << A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ME_BUSY__MASK; +} +#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001 + +#define REG_A5XX_RBBM_STATUS3 0x00000530 +#define A5XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000 + +#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1 + +#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0 + +#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1 + +#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3 + +#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4 + +#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469 + +#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a + +#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f + +#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed + +#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504 + +#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509 + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a + +#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510 + +#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512 + +#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513 + +#define REG_A5XX_RBBM_ISDB_CNT 0x00000533 + +#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000 + +#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400 + +#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800 + +#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801 + +#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802 + +#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803 + +#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804 + +#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805 + +#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806 + +#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807 + +#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810 + +#define REG_A5XX_VSC_BIN_SIZE 0x00000bc2 +#define A5XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff +#define A5XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A5XX_VSC_BIN_SIZE_WIDTH(uint32_t val) +{ + return ((val >> 5) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK; +} +#define A5XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001fe00 +#define A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT 9 +static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK; +} + +#define REG_A5XX_VSC_SIZE_ADDRESS_LO 0x00000bc3 + +#define REG_A5XX_VSC_SIZE_ADDRESS_HI 0x00000bc4 + +#define REG_A5XX_UNKNOWN_0BC5 0x00000bc5 + +#define REG_A5XX_UNKNOWN_0BC6 0x00000bc6 + +static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; } +#define A5XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff +#define A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0 +static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_X(uint32_t val) +{ + return ((val) << A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_X__MASK; +} +#define A5XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00 +#define A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10 +static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val) +{ + return ((val) << A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_Y__MASK; +} +#define A5XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000 +#define A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20 +static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_W(uint32_t val) +{ + return ((val) << A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_W__MASK; +} +#define A5XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000 +#define A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24 +static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) +{ + return ((val) << A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_H__MASK; +} + +static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000be0 + 0x2*i0; } + +static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(uint32_t i0) { return 0x00000be0 + 0x2*i0; } + +static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_HI(uint32_t i0) { return 0x00000be1 + 0x2*i0; } + +static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c00 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c00 + 0x1*i0; } + +#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60 + +#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61 + +#define REG_A5XX_VSC_RESOLVE_CNTL 0x00000cdd +#define A5XX_VSC_RESOLVE_CNTL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_VSC_RESOLVE_CNTL_X__MASK 0x00007fff +#define A5XX_VSC_RESOLVE_CNTL_X__SHIFT 0 +static inline uint32_t A5XX_VSC_RESOLVE_CNTL_X(uint32_t val) +{ + return ((val) << A5XX_VSC_RESOLVE_CNTL_X__SHIFT) & A5XX_VSC_RESOLVE_CNTL_X__MASK; +} +#define A5XX_VSC_RESOLVE_CNTL_Y__MASK 0x7fff0000 +#define A5XX_VSC_RESOLVE_CNTL_Y__SHIFT 16 +static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val) +{ + return ((val) << A5XX_VSC_RESOLVE_CNTL_Y__SHIFT) & A5XX_VSC_RESOLVE_CNTL_Y__MASK; +} + +#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92 + +#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96 + +#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97 + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98 + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99 + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a + +#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b + +#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4 + +#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5 + +#define REG_A5XX_RB_MODE_CNTL 0x00000cc6 + +#define REG_A5XX_RB_CCU_CNTL 0x00000cc7 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6 + +#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7 + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8 + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9 + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda + +#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb + +#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0 + +#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1 + +#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2 + +#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3 + +#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4 + +#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5 + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee + +#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef + +#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00 +#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100 + +#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01 + +#define REG_A5XX_PC_MODE_CNTL 0x00000d02 + +#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04 + +#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05 + +#define REG_A5XX_PC_START_INDEX 0x00000d06 + +#define REG_A5XX_PC_MAX_INDEX 0x00000d07 + +#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08 + +#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16 + +#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17 + +#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00 + +#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01 + +#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04 + +#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05 + +#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16 + +#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17 + +#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08 + +#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00 + +#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000 + +#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41 + +#define REG_A5XX_VFD_MODE_CNTL 0x00000e42 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56 + +#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57 + +#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60 +#define A5XX_VPC_DBG_ECO_CNTL_ALLFLATOPTDIS 0x00000400 + +#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61 + +#define REG_A5XX_VPC_MODE_CNTL 0x00000e62 +#define A5XX_VPC_MODE_CNTL_BINNING_PASS 0x00000001 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66 + +#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67 + +#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80 + +#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81 + +#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82 + +#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87 + +#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88 + +#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89 + +#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a + +#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b + +#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c + +#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d + +#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e + +#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f + +#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94 + +#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95 + +#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6 + +#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7 + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8 + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9 + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa + +#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab + +#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1 + +#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2 + +#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0 + +#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1 + +#define REG_A5XX_SP_MODE_CNTL 0x00000ec2 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9 + +#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda + +#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb + +#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc + +#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd + +#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede + +#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf + +#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01 + +#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16 + +#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17 + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18 + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19 + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a + +#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b + +#define REG_A5XX_VBIF_VERSION 0x00003000 + +#define REG_A5XX_VBIF_CLKON 0x00003001 + +#define REG_A5XX_VBIF_ABIT_SORT 0x00003028 + +#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029 + +#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049 + +#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a + +#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c + +#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d + +#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080 + +#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081 + +#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084 + +#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085 + +#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086 + +#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087 + +#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088 + +#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c + +#define REG_A5XX_VBIF_PERF_CNT_EN0 0x000030c0 + +#define REG_A5XX_VBIF_PERF_CNT_EN1 0x000030c1 + +#define REG_A5XX_VBIF_PERF_CNT_EN2 0x000030c2 + +#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3 + +#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8 + +#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9 + +#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca + +#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb + +#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0 + +#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1 + +#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2 + +#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3 + +#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8 + +#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9 + +#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da + +#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db + +#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0 + +#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1 + +#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2 + +#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119 + +#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a + +#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800 + +#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800 + +#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881 + +#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886 + +#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887 + +#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b +#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000 + +#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d +#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000 + +#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891 + +#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892 + +#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893 + +#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894 + +#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1 + +#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6 + +#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8 + +#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0 + +#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1 + +#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840 + +#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841 + +#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842 + +#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843 + +#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844 + +#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845 + +#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846 + +#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847 + +#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848 + +#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849 + +#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a + +#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b + +#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c + +#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d + +#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e + +#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f + +#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850 + +#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851 + +#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852 + +#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853 + +#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854 + +#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855 + +#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856 + +#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857 + +#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858 + +#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859 + +#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a + +#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b + +#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c + +#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d + +#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e + +#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f + +#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860 + +#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861 + +#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862 + +#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863 + +#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864 + +#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865 + +#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866 + +#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867 + +#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868 + +#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869 + +#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a + +#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b + +#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c + +#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d + +#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e + +#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f + +#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870 + +#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871 + +#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872 + +#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873 + +#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874 + +#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875 + +#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876 + +#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877 + +#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878 + +#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879 + +#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a + +#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b + +#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c + +#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d + +#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3 + +#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8 + +#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00 + +#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01 + +#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02 + +#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03 + +#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05 + +#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06 + +#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40 + +#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41 + +#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42 + +#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43 + +#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46 + +#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60 + +#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61 + +#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62 + +#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80 + +#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4 + +#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5 + +#define REG_A5XX_GDPM_CONFIG1 0x0000b80c + +#define REG_A5XX_GDPM_CONFIG2 0x0000b80d + +#define REG_A5XX_GDPM_INT_EN 0x0000b80f + +#define REG_A5XX_GDPM_INT_MASK 0x0000b811 + +#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0 + +#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421 + +#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520 + +#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557 + +#define REG_A5XX_GRAS_CL_CNTL 0x0000e000 +#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040 + +#define REG_A5XX_GRAS_VS_CL_CNTL 0x0000e001 +#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff +#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK; +} +#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 +#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8 +static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val) +{ + return ((val) << A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK; +} + +#define REG_A5XX_UNKNOWN_E004 0x0000e004 + +#define REG_A5XX_GRAS_CNTL 0x0000e005 +#define A5XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001 +#define A5XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002 +#define A5XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004 +#define A5XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008 +#define A5XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010 +#define A5XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020 +#define A5XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0 +#define A5XX_GRAS_CNTL_COORD_MASK__SHIFT 6 +static inline uint32_t A5XX_GRAS_CNTL_COORD_MASK(uint32_t val) +{ + return ((val) << A5XX_GRAS_CNTL_COORD_MASK__SHIFT) & A5XX_GRAS_CNTL_COORD_MASK__MASK; +} + +#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006 +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val) +{ + return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK; +} +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00 +#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10 +static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val) +{ + return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010 +#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011 +#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012 +#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013 +#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014 +#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK; +} + +#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015 +#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff +#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0 +static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val) +{ + return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK; +} + +#define REG_A5XX_GRAS_SU_CNTL 0x0000e090 +#define A5XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001 +#define A5XX_GRAS_SU_CNTL_CULL_BACK 0x00000002 +#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004 +#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8 +#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3 +static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val) +{ + return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK; +} +#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800 +#define A5XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000 +#define A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13 +static inline uint32_t A5XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val) +{ + return ((val) << A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A5XX_GRAS_SU_CNTL_LINE_MODE__MASK; +} + +#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091 +#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK; +} +#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK; +} + +#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092 +#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff +#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val) +{ + return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK; +} + +#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093 + +#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094 +#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 +#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1 0x00000002 + +#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095 +#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff +#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val) +{ + return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK; +} + +#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096 +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) +{ + return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; +} + +#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097 +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff +#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val) +{ + return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK; +} + +#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098 +#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 +#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val) +{ + return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; +} + +#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099 + +#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0 +#define A5XX_GRAS_SC_CNTL_BINNING_PASS 0x00000001 +#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000 + +#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1 + +#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2 +#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK; +} + +#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3 +#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4 + +#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK; +} +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK; +} +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK; +} +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK; +} +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000 +#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100 +#define A5XX_GRAS_LRZ_CNTL_ENABLE 0x00000001 +#define A5XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002 +#define A5XX_GRAS_LRZ_CNTL_GREATER 0x00000004 + +#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101 + +#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102 + +#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103 +#define A5XX_GRAS_LRZ_BUFFER_PITCH__MASK 0xffffffff +#define A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 5) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK; +} + +#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104 + +#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105 + +#define REG_A5XX_RB_CNTL 0x0000e140 +#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff +#define A5XX_RB_CNTL_WIDTH__SHIFT 0 +static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val) +{ + return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK; +} +#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00 +#define A5XX_RB_CNTL_HEIGHT__SHIFT 9 +static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val) +{ + return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK; +} +#define A5XX_RB_CNTL_BYPASS 0x00020000 + +#define REG_A5XX_RB_RENDER_CNTL 0x0000e141 +#define A5XX_RB_RENDER_CNTL_BINNING_PASS 0x00000001 +#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040 +#define A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE 0x00000080 +#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000 +#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000 +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000 +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16 +static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK; +} +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000 +#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24 +static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK; +} + +#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142 +#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK; +} + +#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143 +#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144 +#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001 +#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002 +#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004 +#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008 +#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010 +#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020 +#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0 +#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6 +static inline uint32_t A5XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK; +} + +#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145 +#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001 +#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002 +#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004 + +#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146 +#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f +#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0 +static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val) +{ + return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK; +} +#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020 + +#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147 +#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f +#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 +#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 +#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 +#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 +#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 +#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 +#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK; +} +#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 +#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 +static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) +{ + return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; } +#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001 +#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002 +#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004 +#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078 +#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3 +static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) +{ + return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK; +} +#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780 +#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7 +static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; } +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 +#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; +} +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 +#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 +static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; } +#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300 +#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8 +static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; +} +#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800 +#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11 +static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) +{ + return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK; +} +#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000 +#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13 +static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; +} +#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000 + +static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; } +#define A5XX_RB_MRT_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; } +#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; } + +#define REG_A5XX_RB_BLEND_RED 0x0000e1a0 +#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_RED_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK; +} +#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_RED_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK; +} +#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1 +#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_RED_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_RED_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK; +} + +#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2 +#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK; +} +#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK; +} +#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3 +#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK; +} + +#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4 +#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK; +} +#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK; +} +#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5 +#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK; +} + +#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6 +#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff +#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK; +} +#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00 +#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8 +static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK; +} +#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000 +#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val) +{ + return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK; +} + +#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7 +#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff +#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val) +{ + return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK; +} + +#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8 +#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff +#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 +static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) +{ + return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; +} +#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 +#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 +#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 +static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK; +} + +#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9 +#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff +#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 +static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK; +} +#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100 +#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 +#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000 +#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16 +static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val) +{ + return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK; +} + +#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0 +#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001 +#define A5XX_RB_DEPTH_PLANE_CNTL_UNK1 0x00000002 + +#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1 +#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001 +#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002 +#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c +#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2 +static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK; +} +#define A5XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040 + +#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2 +#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 +#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val) +{ + return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; +} + +#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3 + +#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4 + +#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5 +#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff +#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK; +} + +#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6 +#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0 +#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 +#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 +#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 +#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 +#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 +#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 +#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 +#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 +#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 +#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 +#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; +} +#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 +#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 +static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; +} + +#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1 +#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 + +#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2 + +#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3 + +#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4 +#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff +#define A5XX_RB_STENCIL_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK; +} + +#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5 +#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6 +#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff +#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0 +static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK; +} +#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00 +#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8 +static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK; +} +#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000 +#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK; +} + +#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7 +#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff +#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0 +static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK; +} +#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00 +#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8 +static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK; +} +#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000 +#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16 +static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val) +{ + return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK; +} + +#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0 +#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff +#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val) +{ + return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK; +} +#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000 +#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val) +{ + return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK; +} + +#define REG_A5XX_RB_SAMPLE_COUNT_CONTROL 0x0000e1d1 +#define A5XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 + +#define REG_A5XX_RB_BLIT_CNTL 0x0000e210 +#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000000f +#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val) +{ + return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK; +} + +#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211 +#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff +#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK; +} +#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000 +#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK; +} + +#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212 +#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000 +#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff +#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK; +} +#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000 +#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16 +static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val) +{ + return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK; +} + +#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213 +#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001 + +#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214 + +#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215 + +#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216 +#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK; +} + +#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217 +#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218 + +#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219 + +#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a + +#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b + +#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c +#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002 +#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004 +#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0 +#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4 +static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val) +{ + return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK; +} + +#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240 + +#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241 + +#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242 + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; } + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; } +#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK; +} + +static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; } +#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263 + +#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264 + +#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265 +#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK; +} + +#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266 +#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff +#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_LO 0x0000e267 + +#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_HI 0x0000e268 + +#define REG_A5XX_VPC_CNTL_0 0x0000e280 +#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f +#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK; +} +#define A5XX_VPC_CNTL_0_VARYING 0x00000800 + +static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; } + +#define REG_A5XX_UNKNOWN_E292 0x0000e292 + +#define REG_A5XX_UNKNOWN_E293 0x0000e293 + +static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; } + +static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; } + +#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298 + +#define REG_A5XX_VPC_CLIP_CNTL 0x0000e29a +#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff +#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK; +} +#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 +#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 +static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) +{ + return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; +} +#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 +#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 +static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) +{ + return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; +} + +#define REG_A5XX_VPC_PACK 0x0000e29d +#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff +#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0 +static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val) +{ + return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK; +} +#define A5XX_VPC_PACK_PSIZELOC__MASK 0x0000ff00 +#define A5XX_VPC_PACK_PSIZELOC__SHIFT 8 +static inline uint32_t A5XX_VPC_PACK_PSIZELOC(uint32_t val) +{ + return ((val) << A5XX_VPC_PACK_PSIZELOC__SHIFT) & A5XX_VPC_PACK_PSIZELOC__MASK; +} + +#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0 + +#define REG_A5XX_VPC_SO_BUF_CNTL 0x0000e2a1 +#define A5XX_VPC_SO_BUF_CNTL_BUF0 0x00000001 +#define A5XX_VPC_SO_BUF_CNTL_BUF1 0x00000008 +#define A5XX_VPC_SO_BUF_CNTL_BUF2 0x00000040 +#define A5XX_VPC_SO_BUF_CNTL_BUF3 0x00000200 +#define A5XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000 + +#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2 +#define A5XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001 + +#define REG_A5XX_VPC_SO_CNTL 0x0000e2a3 +#define A5XX_VPC_SO_CNTL_ENABLE 0x00010000 + +#define REG_A5XX_VPC_SO_PROG 0x0000e2a4 +#define A5XX_VPC_SO_PROG_A_BUF__MASK 0x00000003 +#define A5XX_VPC_SO_PROG_A_BUF__SHIFT 0 +static inline uint32_t A5XX_VPC_SO_PROG_A_BUF(uint32_t val) +{ + return ((val) << A5XX_VPC_SO_PROG_A_BUF__SHIFT) & A5XX_VPC_SO_PROG_A_BUF__MASK; +} +#define A5XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc +#define A5XX_VPC_SO_PROG_A_OFF__SHIFT 2 +static inline uint32_t A5XX_VPC_SO_PROG_A_OFF(uint32_t val) +{ + return ((val >> 2) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK; +} +#define A5XX_VPC_SO_PROG_A_EN 0x00000800 +#define A5XX_VPC_SO_PROG_B_BUF__MASK 0x00003000 +#define A5XX_VPC_SO_PROG_B_BUF__SHIFT 12 +static inline uint32_t A5XX_VPC_SO_PROG_B_BUF(uint32_t val) +{ + return ((val) << A5XX_VPC_SO_PROG_B_BUF__SHIFT) & A5XX_VPC_SO_PROG_B_BUF__MASK; +} +#define A5XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000 +#define A5XX_VPC_SO_PROG_B_OFF__SHIFT 14 +static inline uint32_t A5XX_VPC_SO_PROG_B_OFF(uint32_t val) +{ + return ((val >> 2) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK; +} +#define A5XX_VPC_SO_PROG_B_EN 0x00800000 + +static inline uint32_t REG_A5XX_VPC_SO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; } + +static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; } + +static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000e2a8 + 0x7*i0; } + +static inline uint32_t REG_A5XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000e2a9 + 0x7*i0; } + +static inline uint32_t REG_A5XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000e2aa + 0x7*i0; } + +static inline uint32_t REG_A5XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000e2ab + 0x7*i0; } + +static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000e2ac + 0x7*i0; } + +static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x0000e2ad + 0x7*i0; } + +#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384 +#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f +#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK; +} +#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100 +#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200 +#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400 + +#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385 +#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800 + +#define REG_A5XX_PC_RASTER_CNTL 0x0000e388 +#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007 +#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0 +static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK; +} +#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038 +#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3 +static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK; +} +#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040 + +#define REG_A5XX_PC_CLIP_CNTL 0x0000e389 +#define A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff +#define A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A5XX_PC_CLIP_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK; +} + +#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c + +#define REG_A5XX_PC_GS_LAYERED 0x0000e38d + +#define REG_A5XX_PC_GS_PARAM 0x0000e38e +#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff +#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0 +static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val) +{ + return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK; +} +#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800 +#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11 +static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val) +{ + return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK; +} +#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000 +#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23 +static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val) +{ + return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK; +} + +#define REG_A5XX_PC_HS_PARAM 0x0000e38f +#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f +#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0 +static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val) +{ + return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK; +} +#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000 +#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21 +static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val) +{ + return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK; +} +#define A5XX_PC_HS_PARAM_CW 0x00800000 +#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000 + +#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0 + +#define REG_A5XX_VFD_CONTROL_0 0x0000e400 +#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f +#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0 +static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK; +} + +#define REG_A5XX_VFD_CONTROL_1 0x0000e401 +#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff +#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0 +static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK; +} +#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00 +#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8 +static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK; +} +#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000 +#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16 +static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK; +} + +#define REG_A5XX_VFD_CONTROL_2 0x0000e402 +#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff +#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0 +static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK; +} + +#define REG_A5XX_VFD_CONTROL_3 0x0000e403 +#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00 +#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8 +static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK; +} +#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 +#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 +static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK; +} +#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 +#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 +static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) +{ + return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK; +} + +#define REG_A5XX_VFD_CONTROL_4 0x0000e404 + +#define REG_A5XX_VFD_CONTROL_5 0x0000e405 + +#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408 + +#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409 + +static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; } + +static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; } + +static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; } +#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f +#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0 +static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val) +{ + return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK; +} +#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000 +#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000 +#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20 +static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val) +{ + return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK; +} +#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000 +#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28 +static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK; +} +#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000 +#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000 + +static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; } + +static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; } + +static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; } +#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f +#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0 +static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val) +{ + return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK; +} +#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0 +#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4 +static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val) +{ + return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK; +} + +#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0 + +#define REG_A5XX_SP_SP_CNTL 0x0000e580 + +#define REG_A5XX_SP_VS_CONFIG 0x0000e584 +#define A5XX_SP_VS_CONFIG_ENABLED 0x00000001 +#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_VS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_FS_CONFIG 0x0000e585 +#define A5XX_SP_FS_CONFIG_ENABLED 0x00000001 +#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_FS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_HS_CONFIG 0x0000e586 +#define A5XX_SP_HS_CONFIG_ENABLED 0x00000001 +#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_HS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_DS_CONFIG 0x0000e587 +#define A5XX_SP_DS_CONFIG_ENABLED 0x00000001 +#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_DS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_GS_CONFIG 0x0000e588 +#define A5XX_SP_GS_CONFIG_ENABLED 0x00000001 +#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_GS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_CS_CONFIG 0x0000e589 +#define A5XX_SP_CS_CONFIG_ENABLED 0x00000001 +#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a + +#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b + +#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590 +#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00000008 +#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 3 +static inline uint32_t A5XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK; +} +#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000 +#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 +#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 25 +static inline uint32_t A5XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592 +#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f +#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0 +static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val) +{ + return ((val) << A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK; +} + +static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; } +#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff +#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK; +} +#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00 +#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8 +static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK; +} +#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 +#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK; +} +#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000 +#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24 +static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; } +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab + +#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac + +#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad + +#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0 +#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008 +#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 3 +static inline uint32_t A5XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; +} +#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000 +#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 +#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 25 +static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2 + +#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3 + +#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4 + +#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9 +#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff +#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 +static inline uint32_t A5XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val) +{ + return ((val) << A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK; +} +#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100 +#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 + +#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca +#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f +#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0 +static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK; +} +#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0 +#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5 +static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK; +} +#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000 +#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13 +static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK; +} + +static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; } +#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff +#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0 +static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val) +{ + return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK; +} +#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100 + +static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; } + +static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; } +#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK; +} +#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100 +#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200 +#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400 + +#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db + +#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0 +#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008 +#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3 +static inline uint32_t A5XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK; +} +#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_CS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00100000 +#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 +#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 25 +static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2 + +#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3 + +#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4 + +#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600 +#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008 +#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3 +static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK; +} +#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000 +#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 +#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25 +static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A5XX_UNKNOWN_E602 0x0000e602 + +#define REG_A5XX_SP_HS_OBJ_START_LO 0x0000e603 + +#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604 + +#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610 +#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008 +#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3 +static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK; +} +#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000 +#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 +#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25 +static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A5XX_UNKNOWN_E62B 0x0000e62b + +#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c + +#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d + +#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640 +#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008 +#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3 +static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK; +} +#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0 +#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4 +static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00 +#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10 +static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000 +#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000 +#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000 +#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25 +static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A5XX_UNKNOWN_E65B 0x0000e65b + +#define REG_A5XX_SP_GS_OBJ_START_LO 0x0000e65c + +#define REG_A5XX_SP_GS_OBJ_START_HI 0x0000e65d + +#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704 +#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK; +} + +#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705 +#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000e706 + +#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000e707 + +#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700 + +#define REG_A5XX_TPL1_HS_TEX_COUNT 0x0000e701 + +#define REG_A5XX_TPL1_DS_TEX_COUNT 0x0000e702 + +#define REG_A5XX_TPL1_GS_TEX_COUNT 0x0000e703 + +#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722 + +#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723 + +#define REG_A5XX_TPL1_HS_TEX_SAMP_LO 0x0000e724 + +#define REG_A5XX_TPL1_HS_TEX_SAMP_HI 0x0000e725 + +#define REG_A5XX_TPL1_DS_TEX_SAMP_LO 0x0000e726 + +#define REG_A5XX_TPL1_DS_TEX_SAMP_HI 0x0000e727 + +#define REG_A5XX_TPL1_GS_TEX_SAMP_LO 0x0000e728 + +#define REG_A5XX_TPL1_GS_TEX_SAMP_HI 0x0000e729 + +#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a + +#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b + +#define REG_A5XX_TPL1_HS_TEX_CONST_LO 0x0000e72c + +#define REG_A5XX_TPL1_HS_TEX_CONST_HI 0x0000e72d + +#define REG_A5XX_TPL1_DS_TEX_CONST_LO 0x0000e72e + +#define REG_A5XX_TPL1_DS_TEX_CONST_HI 0x0000e72f + +#define REG_A5XX_TPL1_GS_TEX_CONST_LO 0x0000e730 + +#define REG_A5XX_TPL1_GS_TEX_CONST_HI 0x0000e731 + +#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750 + +#define REG_A5XX_TPL1_CS_TEX_COUNT 0x0000e751 + +#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a + +#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b + +#define REG_A5XX_TPL1_CS_TEX_SAMP_LO 0x0000e75c + +#define REG_A5XX_TPL1_CS_TEX_SAMP_HI 0x0000e75d + +#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e + +#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f + +#define REG_A5XX_TPL1_CS_TEX_CONST_LO 0x0000e760 + +#define REG_A5XX_TPL1_CS_TEX_CONST_HI 0x0000e761 + +#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764 + +#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784 +#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000001 +#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK; +} +#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK 0x00000004 +#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT 2 +static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(enum a3xx_threadsize val) +{ + return ((val) << A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK; +} + +#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785 +#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f +#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK; +} + +#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786 +#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff +#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; +} +#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00 +#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8 +static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK; +} +#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000 +#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16 +static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; +} +#define A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000 +#define A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24 +static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SIZE__MASK; +} + +#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787 +#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff +#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; +} +#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 +#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 +static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; +} +#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 +#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 +static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; +} +#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 +#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 +static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; +} + +#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788 +#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff +#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; +} +#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 +#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 +static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; +} +#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000 +#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16 +static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK; +} +#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000 +#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24 +static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; +} + +#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a + +#define REG_A5XX_HLSQ_VS_CONFIG 0x0000e78b +#define A5XX_HLSQ_VS_CONFIG_ENABLED 0x00000001 +#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_FS_CONFIG 0x0000e78c +#define A5XX_HLSQ_FS_CONFIG_ENABLED 0x00000001 +#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_HS_CONFIG 0x0000e78d +#define A5XX_HLSQ_HS_CONFIG_ENABLED 0x00000001 +#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_DS_CONFIG 0x0000e78e +#define A5XX_HLSQ_DS_CONFIG_ENABLED 0x00000001 +#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_GS_CONFIG 0x0000e78f +#define A5XX_HLSQ_GS_CONFIG_ENABLED 0x00000001 +#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790 +#define A5XX_HLSQ_CS_CONFIG_ENABLED 0x00000001 +#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe +#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1 +static inline uint32_t A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK; +} +#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00 +#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8 +static inline uint32_t A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK; +} + +#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791 +#define A5XX_HLSQ_VS_CNTL_SSBO_ENABLE 0x00000001 +#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792 +#define A5XX_HLSQ_FS_CNTL_SSBO_ENABLE 0x00000001 +#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793 +#define A5XX_HLSQ_HS_CNTL_SSBO_ENABLE 0x00000001 +#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794 +#define A5XX_HLSQ_DS_CNTL_SSBO_ENABLE 0x00000001 +#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795 +#define A5XX_HLSQ_GS_CNTL_SSBO_ENABLE 0x00000001 +#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796 +#define A5XX_HLSQ_CS_CNTL_SSBO_ENABLE 0x00000001 +#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe +#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1 +static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK; +} + +#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9 + +#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba + +#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb + +#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0 +#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003 +#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK; +} +#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc +#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK; +} +#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000 +#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK; +} +#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000 +#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK; +} + +#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1 +#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff +#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK; +} + +#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2 +#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff +#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK; +} + +#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3 +#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff +#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK; +} + +#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4 +#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff +#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK; +} + +#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5 +#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff +#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK; +} + +#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6 +#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff +#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK; +} + +#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7 +#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff +#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0 +static inline uint32_t A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK; +} +#define A5XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00 +#define A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8 +static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK0__MASK; +} +#define A5XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000 +#define A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16 +static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK1__MASK; +} +#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000 +#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24 +static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val) +{ + return ((val) << A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK; +} + +#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8 + +#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0 + +#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3 + +#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4 + +#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5 + +#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8 + +#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9 + +#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca + +#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd + +#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce + +#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf + +#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2 + +#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3 + +#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4 + +#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7 + +#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8 + +#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9 + +#define REG_A5XX_HLSQ_CS_CONSTLEN 0x0000e7dc + +#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd + +#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100 + +#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101 + +#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102 + +#define REG_A5XX_RB_2D_SRC_SOLID_DW2 0x00002103 + +#define REG_A5XX_RB_2D_SRC_SOLID_DW3 0x00002104 + +#define REG_A5XX_RB_2D_SRC_INFO 0x00002107 +#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300 +#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8 +static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK; +} +#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK; +} +#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000 +#define A5XX_RB_2D_SRC_INFO_SRGB 0x00002000 + +#define REG_A5XX_RB_2D_SRC_LO 0x00002108 + +#define REG_A5XX_RB_2D_SRC_HI 0x00002109 + +#define REG_A5XX_RB_2D_SRC_SIZE 0x0000210a +#define A5XX_RB_2D_SRC_SIZE_PITCH__MASK 0x0000ffff +#define A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_2D_SRC_SIZE_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK; +} +#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK 0xffff0000 +#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT 16 +static inline uint32_t A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_2D_DST_INFO 0x00002110 +#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300 +#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8 +static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK; +} +#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK; +} +#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000 +#define A5XX_RB_2D_DST_INFO_SRGB 0x00002000 + +#define REG_A5XX_RB_2D_DST_LO 0x00002111 + +#define REG_A5XX_RB_2D_DST_HI 0x00002112 + +#define REG_A5XX_RB_2D_DST_SIZE 0x00002113 +#define A5XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff +#define A5XX_RB_2D_DST_SIZE_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_2D_DST_SIZE_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK; +} +#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK 0xffff0000 +#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT 16 +static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140 + +#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141 + +#define REG_A5XX_RB_2D_SRC_FLAGS_PITCH 0x00002142 +#define A5XX_RB_2D_SRC_FLAGS_PITCH__MASK 0xffffffff +#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK; +} + +#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143 + +#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144 + +#define REG_A5XX_RB_2D_DST_FLAGS_PITCH 0x00002145 +#define A5XX_RB_2D_DST_FLAGS_PITCH__MASK 0xffffffff +#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0 +static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val) +{ + return ((val >> 6) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK; +} + +#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180 + +#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181 +#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300 +#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8 +static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK; +} +#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK; +} +#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000 +#define A5XX_GRAS_2D_SRC_INFO_SRGB 0x00002000 + +#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182 +#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val) +{ + return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK; +} +#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300 +#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8 +static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK; +} +#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK; +} +#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000 +#define A5XX_GRAS_2D_DST_INFO_SRGB 0x00002000 + +#define REG_A5XX_UNKNOWN_2184 0x00002184 + +#define REG_A5XX_TEX_SAMP_0 0x00000000 +#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 +#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 +#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1 +static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val) +{ + return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK; +} +#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018 +#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3 +static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val) +{ + return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK; +} +#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0 +#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5 +static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val) +{ + return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK; +} +#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700 +#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8 +static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val) +{ + return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK; +} +#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800 +#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11 +static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val) +{ + return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK; +} +#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 +#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14 +static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val) +{ + return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK; +} +#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 +#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 +static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val) +{ + return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK; +} + +#define REG_A5XX_TEX_SAMP_1 0x00000001 +#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e +#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1 +static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val) +{ + return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK; +} +#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 +#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 +#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 +#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 +#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 +static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK; +} +#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 +#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 +static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK; +} + +#define REG_A5XX_TEX_SAMP_2 0x00000002 +#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80 +#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7 +static inline uint32_t A5XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val) +{ + return ((val) << A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK; +} + +#define REG_A5XX_TEX_SAMP_3 0x00000003 + +#define REG_A5XX_TEX_CONST_0 0x00000000 +#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003 +#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val) +{ + return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK; +} +#define A5XX_TEX_CONST_0_SRGB 0x00000004 +#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 +#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK; +} +#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 +#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK; +} +#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 +#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK; +} +#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 +#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13 +static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val) +{ + return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK; +} +#define A5XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 +#define A5XX_TEX_CONST_0_MIPLVLS__SHIFT 16 +static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK; +} +#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000 +#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20 +static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK; +} +#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000 +#define A5XX_TEX_CONST_0_FMT__SHIFT 22 +static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val) +{ + return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK; +} +#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000 +#define A5XX_TEX_CONST_0_SWAP__SHIFT 30 +static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK; +} + +#define REG_A5XX_TEX_CONST_1 0x00000001 +#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff +#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK; +} +#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000 +#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15 +static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK; +} + +#define REG_A5XX_TEX_CONST_2 0x00000002 +#define A5XX_TEX_CONST_2_BUFFER 0x00000010 +#define A5XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f +#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A5XX_TEX_CONST_2_PITCHALIGN__MASK; +} +#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80 +#define A5XX_TEX_CONST_2_PITCH__SHIFT 7 +static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK; +} +#define A5XX_TEX_CONST_2_TYPE__MASK 0xe0000000 +#define A5XX_TEX_CONST_2_TYPE__SHIFT 29 +static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val) +{ + return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK; +} + +#define REG_A5XX_TEX_CONST_3 0x00000003 +#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff +#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK; +} +#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000 +#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23 +static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val) +{ + return ((val >> 12) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK; +} +#define A5XX_TEX_CONST_3_TILE_ALL 0x08000000 +#define A5XX_TEX_CONST_3_FLAG 0x10000000 + +#define REG_A5XX_TEX_CONST_4 0x00000004 +#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0 +#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5 +static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val) +{ + return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK; +} + +#define REG_A5XX_TEX_CONST_5 0x00000005 +#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff +#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0 +static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK; +} +#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000 +#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17 +static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val) +{ + return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK; +} + +#define REG_A5XX_TEX_CONST_6 0x00000006 + +#define REG_A5XX_TEX_CONST_7 0x00000007 + +#define REG_A5XX_TEX_CONST_8 0x00000008 + +#define REG_A5XX_TEX_CONST_9 0x00000009 + +#define REG_A5XX_TEX_CONST_10 0x0000000a + +#define REG_A5XX_TEX_CONST_11 0x0000000b + +#define REG_A5XX_SSBO_0_0 0x00000000 +#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0 +#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5 +static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val) +{ + return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK; +} + +#define REG_A5XX_SSBO_0_1 0x00000001 +#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff +#define A5XX_SSBO_0_1_PITCH__SHIFT 0 +static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val) +{ + return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK; +} + +#define REG_A5XX_SSBO_0_2 0x00000002 +#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000 +#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12 +static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK; +} + +#define REG_A5XX_SSBO_0_3 0x00000003 +#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f +#define A5XX_SSBO_0_3_CPP__SHIFT 0 +static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val) +{ + return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK; +} + +#define REG_A5XX_SSBO_1_0 0x00000000 +#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00 +#define A5XX_SSBO_1_0_FMT__SHIFT 8 +static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val) +{ + return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK; +} +#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000 +#define A5XX_SSBO_1_0_WIDTH__SHIFT 16 +static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val) +{ + return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK; +} + +#define REG_A5XX_SSBO_1_1 0x00000001 +#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff +#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0 +static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val) +{ + return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK; +} +#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000 +#define A5XX_SSBO_1_1_DEPTH__SHIFT 16 +static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val) +{ + return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK; +} + +#define REG_A5XX_SSBO_2_0 0x00000000 +#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff +#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0 +static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val) +{ + return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK; +} + +#define REG_A5XX_SSBO_2_1 0x00000001 +#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff +#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0 +static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val) +{ + return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK; +} + +#define REG_A5XX_UBO_0 0x00000000 +#define A5XX_UBO_0_BASE_LO__MASK 0xffffffff +#define A5XX_UBO_0_BASE_LO__SHIFT 0 +static inline uint32_t A5XX_UBO_0_BASE_LO(uint32_t val) +{ + return ((val) << A5XX_UBO_0_BASE_LO__SHIFT) & A5XX_UBO_0_BASE_LO__MASK; +} + +#define REG_A5XX_UBO_1 0x00000001 +#define A5XX_UBO_1_BASE_HI__MASK 0x0001ffff +#define A5XX_UBO_1_BASE_HI__SHIFT 0 +static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val) +{ + return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK; +} + + +#endif /* A5XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c new file mode 100644 index 000000000..6bd397a85 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include +#include +#include + +#include "a5xx_gpu.h" + +static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "PFP state:\n"); + + for (i = 0; i < 36; i++) { + gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); + } +} + +static void me_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "ME state:\n"); + + for (i = 0; i < 29; i++) { + gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i); + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); + } +} + +static void meq_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "MEQ state:\n"); + gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); + + for (i = 0; i < 64; i++) { + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); + } +} + +static void roq_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "ROQ state:\n"); + gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0); + + for (i = 0; i < 512 / 4; i++) { + uint32_t val[4]; + int j; + for (j = 0; j < 4; j++) + val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA); + drm_printf(p, " %02x: %08x %08x %08x %08x\n", i, + val[0], val[1], val[2], val[3]); + } +} + +static int show(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_printer p = drm_seq_file_printer(m); + void (*show)(struct msm_gpu *gpu, struct drm_printer *p) = + node->info_ent->data; + + show(priv->gpu, &p); + return 0; +} + +#define ENT(n) { .name = #n, .show = show, .data = n ##_print } +static struct drm_info_list a5xx_debugfs_list[] = { + ENT(pfp), + ENT(me), + ENT(meq), + ENT(roq), +}; + +/* for debugfs files that can be written to, we can't use drm helper: */ +static int +reset_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (!capable(CAP_SYS_ADMIN)) + return -EINVAL; + + /* TODO do we care about trying to make sure the GPU is idle? + * Since this is just a debug feature limited to CAP_SYS_ADMIN, + * maybe it is fine to let the user keep both pieces if they + * try to reset an active GPU. + */ + + mutex_lock(&gpu->lock); + + release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]); + adreno_gpu->fw[ADRENO_FW_PM4] = NULL; + + release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]); + adreno_gpu->fw[ADRENO_FW_PFP] = NULL; + + if (a5xx_gpu->pm4_bo) { + msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->pm4_bo); + a5xx_gpu->pm4_bo = NULL; + } + + if (a5xx_gpu->pfp_bo) { + msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->pfp_bo); + a5xx_gpu->pfp_bo = NULL; + } + + gpu->needs_hw_init = true; + + pm_runtime_get_sync(&gpu->pdev->dev); + gpu->funcs->recover(gpu); + + pm_runtime_put_sync(&gpu->pdev->dev); + mutex_unlock(&gpu->lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); + + +void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) +{ + struct drm_device *dev; + + if (!minor) + return; + + dev = minor->dev; + + drm_debugfs_create_files(a5xx_debugfs_list, + ARRAY_SIZE(a5xx_debugfs_list), + minor->debugfs_root, minor); + + debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev, + &reset_fops); +} diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c new file mode 100644 index 000000000..895a0e9db --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -0,0 +1,1786 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "msm_gem.h" +#include "msm_mmu.h" +#include "a5xx_gpu.h" + +extern bool hang_debug; +static void a5xx_dump(struct msm_gpu *gpu); + +#define GPU_PAS_ID 13 + +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (a5xx_gpu->has_whereami) { + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring))); + } +} + +void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + bool sync) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + uint32_t wptr; + unsigned long flags; + + /* + * Most flush operations need to issue a WHERE_AM_I opcode to sync up + * the rptr shadow + */ + if (sync) + update_shadow_rptr(gpu, ring); + + spin_lock_irqsave(&ring->preempt_lock, flags); + + /* Copy the shadow to the actual register */ + ring->cur = ring->next; + + /* Make sure to wrap wptr if we need to */ + wptr = get_wptr(ring); + + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + /* Make sure everything is posted before making a decision */ + mb(); + + /* Update HW if this is the current ring and we are not in preempt */ + if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu)) + gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); +} + +static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct msm_ringbuffer *ring = submit->ring; + struct msm_gem_object *obj; + uint32_t *ptr, dwords; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + /* copy commands into RB: */ + obj = submit->bos[submit->cmd[i].idx].obj; + dwords = submit->cmd[i].size; + + ptr = msm_gem_get_vaddr(&obj->base); + + /* _get_vaddr() shouldn't fail at this point, + * since we've already mapped it once in + * submit_reloc() + */ + if (WARN_ON(IS_ERR_OR_NULL(ptr))) + return; + + for (i = 0; i < dwords; i++) { + /* normally the OUT_PKTn() would wait + * for space for the packet. But since + * we just OUT_RING() the whole thing, + * need to call adreno_wait_ring() + * ourself: + */ + adreno_wait_ring(ring, 1); + OUT_RING(ring, ptr[i]); + } + + msm_gem_put_vaddr(&obj->base); + + break; + } + } + + a5xx_flush(gpu, ring, true); + a5xx_preempt_trigger(gpu); + + /* we might not necessarily have a cmd from userspace to + * trigger an event to know that submit has completed, so + * do this manually: + */ + a5xx_idle(gpu, ring); + ring->memptrs->fence = submit->seqno; + msm_gpu_retire(gpu); +} + +static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = submit->ring; + unsigned int i, ibs = 0; + + if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { + gpu->cur_ctx_seqno = 0; + a5xx_submit_in_rb(gpu, submit); + return; + } + + OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); + OUT_RING(ring, 0x02); + + /* Turn off protected mode to write to special registers */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + /* Set the save preemption record for the ring/command */ + OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); + OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); + OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); + + /* Turn back on protected mode */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + + /* Enable local preemption for finegrain preemption */ + OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); + OUT_RING(ring, 0x1); + + /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ + OUT_PKT7(ring, CP_YIELD_ENABLE, 1); + OUT_RING(ring, 0x02); + + /* Submit the commands */ + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + ibs++; + break; + } + + /* + * Periodically update shadow-wptr if needed, so that we + * can see partial progress of submits with large # of + * cmds.. otherwise we could needlessly stall waiting for + * ringbuffer state, simply due to looking at a shadow + * rptr value that has not been updated + */ + if ((ibs % 32) == 0) + update_shadow_rptr(gpu, ring); + } + + /* + * Write the render mode to NULL (0) to indicate to the CP that the IBs + * are done rendering - otherwise a lucky preemption would start + * replaying from the last checkpoint + */ + OUT_PKT7(ring, CP_SET_RENDER_MODE, 5); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* Turn off IB level preemptions */ + OUT_PKT7(ring, CP_YIELD_ENABLE, 1); + OUT_RING(ring, 0x01); + + /* Write the fence to the scratch register */ + OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); + OUT_RING(ring, submit->seqno); + + /* + * Execute a CACHE_FLUSH_TS event. This will ensure that the + * timestamp is written to the memory and then triggers the interrupt + */ + OUT_PKT7(ring, CP_EVENT_WRITE, 4); + OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) | + CP_EVENT_WRITE_0_IRQ); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, submit->seqno); + + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + /* + * If dword[2:1] are non zero, they specify an address for the CP to + * write the value of dword[3] to on preemption complete. Write 0 to + * skip the write + */ + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Data value - not used if the address above is 0 */ + OUT_RING(ring, 0x01); + /* Set bit 0 to trigger an interrupt on preempt complete */ + OUT_RING(ring, 0x01); + + /* A WHERE_AM_I packet is not needed after a YIELD */ + a5xx_flush(gpu, ring, false); + + /* Check to see if we need to start preemption */ + a5xx_preempt_trigger(gpu); +} + +static const struct adreno_five_hwcg_regs { + u32 offset; + u32 value; +} a5xx_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} +}, a50x_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, +}, a512_hwcg[] = { + {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, + {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111}, + {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444}, + {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220}, + {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222}, + {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404}, + {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002}, + {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011}, + {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222}, + {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, +}; + +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const struct adreno_five_hwcg_regs *regs; + unsigned int i, sz; + + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) { + regs = a50x_hwcg; + sz = ARRAY_SIZE(a50x_hwcg); + } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) { + regs = a512_hwcg; + sz = ARRAY_SIZE(a512_hwcg); + } else { + regs = a5xx_hwcg; + sz = ARRAY_SIZE(a5xx_hwcg); + } + + for (i = 0; i < sz; i++) + gpu_write(gpu, regs[i].offset, + state ? regs[i].value : 0); + + if (adreno_is_a540(adreno_gpu)) { + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0); + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0); + } + + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); +} + +static int a5xx_me_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + OUT_PKT7(ring, CP_ME_INIT, 8); + + OUT_RING(ring, 0x0000002F); + + /* Enable multiple hardware contexts */ + OUT_RING(ring, 0x00000003); + + /* Enable error detection */ + OUT_RING(ring, 0x20000000); + + /* Don't enable header dump */ + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + /* Specify workarounds for various microcode issues */ + if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) { + /* Workaround for token end syncs + * Force a WFI after every direct-render 3D mode draw and every + * 2D mode 3 draw + */ + OUT_RING(ring, 0x0000000B); + } else if (adreno_is_a510(adreno_gpu)) { + /* Workaround for token and syncs */ + OUT_RING(ring, 0x00000001); + } else { + /* No workarounds enabled */ + OUT_RING(ring, 0x00000000); + } + + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + a5xx_flush(gpu, ring, true); + return a5xx_idle(gpu, ring) ? 0 : -EINVAL; +} + +static int a5xx_preempt_start(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + if (gpu->nr_rings == 1) + return 0; + + /* Turn off protected mode to write to special registers */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + /* Set the save preemption record for the ring/command */ + OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); + OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id])); + OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id])); + + /* Turn back on protected mode */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + + OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); + OUT_RING(ring, 0x00); + + OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); + OUT_RING(ring, 0x01); + + OUT_PKT7(ring, CP_YIELD_ENABLE, 1); + OUT_RING(ring, 0x01); + + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x01); + OUT_RING(ring, 0x01); + + /* The WHERE_AMI_I packet is not needed after a YIELD is issued */ + a5xx_flush(gpu, ring, false); + + return a5xx_idle(gpu, ring) ? 0 : -EINVAL; +} + +static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu, + struct drm_gem_object *obj) +{ + u32 *buf = msm_gem_get_vaddr(obj); + + if (IS_ERR(buf)) + return; + + /* + * If the lowest nibble is 0xa that is an indication that this microcode + * has been patched. The actual version is in dword [3] but we only care + * about the patchlevel which is the lowest nibble of dword [3] + */ + if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) + a5xx_gpu->has_whereami = true; + + msm_gem_put_vaddr(obj); +} + +static int a5xx_ucode_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int ret; + + if (!a5xx_gpu->pm4_bo) { + a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu, + adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova); + + + if (IS_ERR(a5xx_gpu->pm4_bo)) { + ret = PTR_ERR(a5xx_gpu->pm4_bo); + a5xx_gpu->pm4_bo = NULL; + DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n", + ret); + return ret; + } + + msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw"); + } + + if (!a5xx_gpu->pfp_bo) { + a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu, + adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova); + + if (IS_ERR(a5xx_gpu->pfp_bo)) { + ret = PTR_ERR(a5xx_gpu->pfp_bo); + a5xx_gpu->pfp_bo = NULL; + DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n", + ret); + return ret; + } + + msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw"); + a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo); + } + + gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova); + + gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova); + + return 0; +} + +#define SCM_GPU_ZAP_SHADER_RESUME 0 + +static int a5xx_zap_shader_resume(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + /* + * Adreno 506 have CPZ Retention feature and doesn't require + * to resume zap shader + */ + if (adreno_is_a506(adreno_gpu)) + return 0; + + ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID); + if (ret) + DRM_ERROR("%s: zap-shader resume failed: %d\n", + gpu->name, ret); + + return ret; +} + +static int a5xx_zap_shader_init(struct msm_gpu *gpu) +{ + static bool loaded; + int ret; + + /* + * If the zap shader is already loaded into memory we just need to kick + * the remote processor to reinitialize it + */ + if (loaded) + return a5xx_zap_shader_resume(gpu); + + ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); + + loaded = !ret; + return ret; +} + +#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ + A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ + A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \ + A5XX_RBBM_INT_0_MASK_CP_SW | \ + A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ + A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ + A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) + +static int a5xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + u32 regbit; + int ret; + + gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + + if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a540(adreno_gpu)) + gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); + + /* Make all blocks contribute to the GPU BUSY perf counter */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF); + + /* Enable RBBM error reporting bits */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); + + if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { + /* + * Mask out the activity signals from RB1-3 to avoid false + * positives + */ + + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11, + 0xF0000000); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17, + 0xFFFFFFFF); + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18, + 0xFFFFFFFF); + } + + /* Enable fault detection */ + gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL, + (1 << 30) | 0xFFFF); + + /* Turn on performance counters */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01); + + /* Select CP0 to always count cycles */ + gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); + + /* Select RBBM0 to countable 6 to get the busy status for devfreq */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6); + + /* Increase VFD cache access so LRZ and other data gets evicted less */ + gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02); + + /* Disable L2 bypass in the UCHE */ + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000); + gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000); + gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF); + + /* Set the GMEM VA range (0 to gpu->gmem) */ + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO, + 0x00100000 + adreno_gpu->gmem - 1); + gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); + + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || + adreno_is_a510(adreno_gpu)) { + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); + else + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); + } else { + gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); + if (adreno_is_a530(adreno_gpu)) + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); + else + gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); + gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); + } + + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x100 << 11 | 0x100 << 22)); + else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) || + adreno_is_a512(adreno_gpu)) + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x200 << 11 | 0x200 << 22)); + else + gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, + (0x400 << 11 | 0x300 << 22)); + + if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) + gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); + + /* + * Disable the RB sampler datapath DP2 clock gating optimization + * for 1-SP GPUs, as it is enabled by default. + */ + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || + adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) + gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9)); + + /* Disable UCHE global filter as SP can invalidate/flush independently */ + gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29)); + + /* Enable USE_RETENTION_FLOPS */ + gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); + + /* Enable ME/PFP split notification */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); + + /* + * In A5x, CCU can send context_done event of a particular context to + * UCHE which ultimately reaches CP even when there is valid + * transaction of that context inside CCU. This can let CP to program + * config registers, which will make the "valid transaction" inside + * CCU to be interpreted differently. This can cause gpu fault. This + * bug is fixed in latest A510 revision. To enable this bug fix - + * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1 + * (disable). For older A510 version this bit is unused. + */ + if (adreno_is_a510(adreno_gpu)) + gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0); + + /* Enable HWCG */ + a5xx_set_hwcg(gpu, true); + + gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); + + /* Set the highest bank bit */ + if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) + regbit = 2; + else + regbit = 1; + + gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7); + gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1); + + if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) || + adreno_is_a540(adreno_gpu)) + gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit); + + /* Disable All flat shading optimization (ALLFLATOPTDIS) */ + gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10)); + + /* Protect registers from the CP */ + gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); + + /* RBBM */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64)); + + /* Content protect */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(6), + ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, + 16)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(7), + ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2)); + + /* CP */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1)); + + /* RB */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2)); + + /* VPC */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8)); + gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16)); + + /* UCHE */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); + + /* SMMU */ + gpu_write(gpu, REG_A5XX_CP_PROTECT(17), + ADRENO_PROTECT_RW(0x10000, 0x8000)); + + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0); + /* + * Disable the trusted memory range - we don't actually supported secure + * memory rendering at this point in time and we don't want to block off + * part of the virtual memory space. + */ + gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000); + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); + + /* Put the GPU into 64 bit by default */ + gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); + + /* + * VPC corner case with local memory load kill leads to corrupt + * internal state. Normal Disable does not work for all a5x chips. + * So do the following setting to disable it. + */ + if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) { + gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23)); + gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0); + } + + ret = adreno_hw_init(gpu); + if (ret) + return ret; + + if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)) + a5xx_gpmu_ucode_init(gpu); + + ret = a5xx_ucode_init(gpu); + if (ret) + return ret; + + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova); + + /* + * If the microcode supports the WHERE_AM_I opcode then we can use that + * in lieu of the RPTR shadow and enable preemption. Otherwise, we + * can't safely use the RPTR shadow or preemption. In either case, the + * RPTR shadow should be disabled in hardware. + */ + gpu_write(gpu, REG_A5XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* Create a privileged buffer for the RPTR shadow */ + if (a5xx_gpu->has_whereami) { + if (!a5xx_gpu->shadow_bo) { + a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a5xx_gpu->shadow_bo, + &a5xx_gpu->shadow_iova); + + if (IS_ERR(a5xx_gpu->shadow)) + return PTR_ERR(a5xx_gpu->shadow); + + msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow"); + } + + gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, + shadowptr(a5xx_gpu, gpu->rb[0])); + } else if (gpu->nr_rings > 1) { + /* Disable preemption if WHERE_AM_I isn't available */ + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; + } + + a5xx_preempt_hw_init(gpu); + + /* Disable the interrupts through the initial bringup stage */ + gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); + + /* Clear ME_HALT to start the micro engine */ + gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0); + ret = a5xx_me_init(gpu); + if (ret) + return ret; + + ret = a5xx_power_init(gpu); + if (ret) + return ret; + + /* + * Send a pipeline event stat to get misbehaving counters to start + * ticking correctly + */ + if (adreno_is_a530(adreno_gpu)) { + OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1); + OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT)); + + a5xx_flush(gpu, gpu->rb[0], true); + if (!a5xx_idle(gpu, gpu->rb[0])) + return -EINVAL; + } + + /* + * If the chip that we are using does support loading one, then + * try to load a zap shader into the secure world. If successful + * we can use the CP to switch out of secure mode. If not then we + * have no resource but to try to switch ourselves out manually. If we + * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will + * be blocked and a permissions violation will soon follow. + */ + ret = a5xx_zap_shader_init(gpu); + if (!ret) { + OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); + OUT_RING(gpu->rb[0], 0x00000000); + + a5xx_flush(gpu, gpu->rb[0], true); + if (!a5xx_idle(gpu, gpu->rb[0])) + return -EINVAL; + } else if (ret == -ENODEV) { + /* + * This device does not use zap shader (but print a warning + * just in case someone got their dt wrong.. hopefully they + * have a debug UART to realize the error of their ways... + * if you mess this up you are about to crash horribly) + */ + dev_warn_once(gpu->dev->dev, + "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); + gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); + } else { + return ret; + } + + /* Last step - yield the ringbuffer */ + a5xx_preempt_start(gpu); + + return 0; +} + +static void a5xx_recover(struct msm_gpu *gpu) +{ + int i; + + adreno_dump_info(gpu); + + for (i = 0; i < 8; i++) { + printk("CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i))); + } + + if (hang_debug) + a5xx_dump(gpu); + + gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1); + gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD); + gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0); + adreno_recover(gpu); +} + +static void a5xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + DBG("%s", gpu->name); + + a5xx_preempt_fini(gpu); + + if (a5xx_gpu->pm4_bo) { + msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->pm4_bo); + } + + if (a5xx_gpu->pfp_bo) { + msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->pfp_bo); + } + + if (a5xx_gpu->gpmu_bo) { + msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->gpmu_bo); + } + + if (a5xx_gpu->shadow_bo) { + msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace); + drm_gem_object_put(a5xx_gpu->shadow_bo); + } + + adreno_gpu_cleanup(adreno_gpu); + kfree(a5xx_gpu); +} + +static inline bool _a5xx_check_idle(struct msm_gpu *gpu) +{ + if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY) + return false; + + /* + * Nearly every abnormality ends up pausing the GPU and triggering a + * fault so we can safely just watch for this one interrupt to fire + */ + return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) & + A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT); +} + +bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (ring != a5xx_gpu->cur_ring) { + WARN(1, "Tried to idle a non-current ringbuffer\n"); + return false; + } + + /* wait for CP to drain ringbuffer: */ + if (!adreno_idle(gpu, ring)) + return false; + + if (spin_until(_a5xx_check_idle(gpu))) { + DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", + gpu->name, __builtin_return_address(0), + gpu_read(gpu, REG_A5XX_RBBM_STATUS), + gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS), + gpu_read(gpu, REG_A5XX_CP_RB_RPTR), + gpu_read(gpu, REG_A5XX_CP_RB_WPTR)); + return false; + } + + return true; +} + +static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data) +{ + struct msm_gpu *gpu = arg; + pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n", + iova, flags, + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7))); + + return 0; +} + +static void a5xx_cp_err_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); + + if (status & A5XX_CP_INT_CP_OPCODE_ERROR) { + u32 val; + + gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0); + + /* + * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so + * read it twice + */ + + gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); + val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); + + dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n", + val); + } + + if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR) + dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n", + gpu_read(gpu, REG_A5XX_CP_HW_FAULT)); + + if (status & A5XX_CP_INT_CP_DMA_ERROR) + dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n"); + + if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) { + u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS); + + dev_err_ratelimited(gpu->dev->dev, + "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n", + val & (1 << 24) ? "WRITE" : "READ", + (val & 0xFFFFF) >> 2, val); + } + + if (status & A5XX_CP_INT_CP_AHB_ERROR) { + u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT); + const char *access[16] = { "reserved", "reserved", + "timestamp lo", "timestamp hi", "pfp read", "pfp write", + "", "", "me read", "me write", "", "", "crashdump read", + "crashdump write" }; + + dev_err_ratelimited(gpu->dev->dev, + "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n", + status & 0xFFFFF, access[(status >> 24) & 0xF], + (status & (1 << 31)), status); + } +} + +static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status) +{ + if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) { + u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); + + dev_err_ratelimited(gpu->dev->dev, + "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n", + val & (1 << 28) ? "WRITE" : "READ", + (val & 0xFFFFF) >> 2, (val >> 20) & 0x3, + (val >> 24) & 0xF); + + /* Clear the error */ + gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); + + /* Clear the interrupt */ + gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, + A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR); + } + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n"); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n", + gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS)); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n"); + + if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) + dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n"); +} + +static void a5xx_uche_err_irq(struct msm_gpu *gpu) +{ + uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI); + + addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO); + + dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n", + addr); +} + +static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) +{ + dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); +} + +static void a5xx_fault_detect_irq(struct msm_gpu *gpu) +{ + struct drm_device *dev = gpu->dev; + struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); + + /* + * If stalled on SMMU fault, we could trip the GPU's hang detection, + * but the fault handler will trigger the devcore dump, and we want + * to otherwise resume normally rather than killing the submit, so + * just bail. + */ + if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24)) + return; + + DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", + ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, + gpu_read(gpu, REG_A5XX_RBBM_STATUS), + gpu_read(gpu, REG_A5XX_CP_RB_RPTR), + gpu_read(gpu, REG_A5XX_CP_RB_WPTR), + gpu_read64(gpu, REG_A5XX_CP_IB1_BASE), + gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ), + gpu_read64(gpu, REG_A5XX_CP_IB2_BASE), + gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); + + /* Turn off the hangcheck timer to keep it from bothering us */ + del_timer(&gpu->hangcheck_timer); + + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +#define RBBM_ERROR_MASK \ + (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ + A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ + A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW) + +static irqreturn_t a5xx_irq(struct msm_gpu *gpu) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); + + /* + * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it + * before the source is cleared the interrupt will storm. + */ + gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, + status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR); + + if (priv->disable_err_irq) { + status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | + A5XX_RBBM_INT_0_MASK_CP_SW; + } + + /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */ + if (status & RBBM_ERROR_MASK) + a5xx_rbbm_err_irq(gpu, status); + + if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) + a5xx_cp_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT) + a5xx_fault_detect_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) + a5xx_uche_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) + a5xx_gpmu_err_irq(gpu); + + if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { + a5xx_preempt_trigger(gpu); + msm_gpu_retire(gpu); + } + + if (status & A5XX_RBBM_INT_0_MASK_CP_SW) + a5xx_preempt_irq(gpu); + + return IRQ_HANDLED; +} + +static const u32 a5xx_registers[] = { + 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, + 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, + 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, + 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841, + 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28, + 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, + 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98, + 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585, + 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, + 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, + 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545, + 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0, + 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, + 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, + 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, + 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201, + 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A, + 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F, + 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, + 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947, + 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, + 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68, + 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, + 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, + 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3, + 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D, + 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5, + 0xAC60, 0xAC60, ~0, +}; + +static void a5xx_dump(struct msm_gpu *gpu) +{ + DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n", + gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +static int a5xx_pm_resume(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + /* Turn on the core power */ + ret = msm_gpu_pm_resume(gpu); + if (ret) + return ret; + + /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */ + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) { + /* Halt the sp_input_clk at HM level */ + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); + a5xx_set_hwcg(gpu, true); + /* Turn on sp_input_clk at HM level */ + gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0); + return 0; + } + + /* Turn the RBCCU domain first to limit the chances of voltage droop */ + gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); + + /* Wait 3 usecs before polling */ + udelay(3); + + ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS, + (1 << 20), (1 << 20)); + if (ret) { + DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n", + gpu->name, + gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS)); + return ret; + } + + /* Turn on the SP domain */ + gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000); + ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS, + (1 << 20), (1 << 20)); + if (ret) + DRM_ERROR("%s: timeout waiting for SP GDSC enable\n", + gpu->name); + + return ret; +} + +static int a5xx_pm_suspend(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + u32 mask = 0xf; + int i, ret; + + /* A506, A508, A510 have 3 XIN ports in VBIF */ + if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) || + adreno_is_a510(adreno_gpu)) + mask = 0x7; + + /* Clear the VBIF pipe before shutting down */ + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask); + spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & + mask) == mask); + + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); + + /* + * Reset the VBIF before power collapse to avoid issue with FIFO + * entries on Adreno A510 and A530 (the others will tend to lock up) + */ + if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) { + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); + } + + ret = msm_gpu_pm_suspend(gpu); + if (ret) + return ret; + + if (a5xx_gpu->has_whereami) + for (i = 0; i < gpu->nr_rings; i++) + a5xx_gpu->shadow[i] = 0; + + return 0; +} + +static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +{ + *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO); + + return 0; +} + +struct a5xx_crashdumper { + void *ptr; + struct drm_gem_object *bo; + u64 iova; +}; + +struct a5xx_gpu_state { + struct msm_gpu_state base; + u32 *hlsqregs; +}; + +static int a5xx_crashdumper_init(struct msm_gpu *gpu, + struct a5xx_crashdumper *dumper) +{ + dumper->ptr = msm_gem_kernel_new(gpu->dev, + SZ_1M, MSM_BO_WC, gpu->aspace, + &dumper->bo, &dumper->iova); + + if (!IS_ERR(dumper->ptr)) + msm_gem_object_set_name(dumper->bo, "crashdump"); + + return PTR_ERR_OR_ZERO(dumper->ptr); +} + +static int a5xx_crashdumper_run(struct msm_gpu *gpu, + struct a5xx_crashdumper *dumper) +{ + u32 val; + + if (IS_ERR_OR_NULL(dumper->ptr)) + return -EINVAL; + + gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova); + + gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1); + + return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val, + val & 0x04, 100, 10000); +} + +/* + * These are a list of the registers that need to be read through the HLSQ + * aperture through the crashdumper. These are not nominally accessible from + * the CPU on a secure platform. + */ +static const struct { + u32 type; + u32 regoffset; + u32 count; +} a5xx_hlsq_aperture_regs[] = { + { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */ + { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */ + { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */ + { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */ + { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */ + { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */ + { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */ + { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */ + { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */ + { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */ + { 0x3a, 0x0f00, 0x1c }, /* TP non-context */ + { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */ + { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */ + { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */ + { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */ +}; + +static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, + struct a5xx_gpu_state *a5xx_state) +{ + struct a5xx_crashdumper dumper = { 0 }; + u32 offset, count = 0; + u64 *ptr; + int i; + + if (a5xx_crashdumper_init(gpu, &dumper)) + return; + + /* The script will be written at offset 0 */ + ptr = dumper.ptr; + + /* Start writing the data at offset 256k */ + offset = dumper.iova + (256 * SZ_1K); + + /* Count how many additional registers to get from the HLSQ aperture */ + for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) + count += a5xx_hlsq_aperture_regs[i].count; + + a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL); + if (!a5xx_state->hlsqregs) + return; + + /* Build the crashdump script */ + for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) { + u32 type = a5xx_hlsq_aperture_regs[i].type; + u32 c = a5xx_hlsq_aperture_regs[i].count; + + /* Write the register to select the desired bank */ + *ptr++ = ((u64) type << 8); + *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) | + (1 << 21) | 1; + + *ptr++ = offset; + *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44) + | c; + + offset += c * sizeof(u32); + } + + /* Write two zeros to close off the script */ + *ptr++ = 0; + *ptr++ = 0; + + if (a5xx_crashdumper_run(gpu, &dumper)) { + kfree(a5xx_state->hlsqregs); + msm_gem_kernel_put(dumper.bo, gpu->aspace); + return; + } + + /* Copy the data from the crashdumper to the state */ + memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K), + count * sizeof(u32)); + + msm_gem_kernel_put(dumper.bo, gpu->aspace); +} + +static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu) +{ + struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state), + GFP_KERNEL); + bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24)); + + if (!a5xx_state) + return ERR_PTR(-ENOMEM); + + /* Temporarily disable hardware clock gating before reading the hw */ + a5xx_set_hwcg(gpu, false); + + /* First get the generic state from the adreno core */ + adreno_gpu_state_get(gpu, &(a5xx_state->base)); + + a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS); + + /* + * Get the HLSQ regs with the help of the crashdumper, but only if + * we are not stalled in an iommu fault (in which case the crashdumper + * would not have access to memory) + */ + if (!stalled) + a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state); + + a5xx_set_hwcg(gpu, true); + + return &a5xx_state->base; +} + +static void a5xx_gpu_state_destroy(struct kref *kref) +{ + struct msm_gpu_state *state = container_of(kref, + struct msm_gpu_state, ref); + struct a5xx_gpu_state *a5xx_state = container_of(state, + struct a5xx_gpu_state, base); + + kfree(a5xx_state->hlsqregs); + + adreno_gpu_state_destroy(state); + kfree(a5xx_state); +} + +static int a5xx_gpu_state_put(struct msm_gpu_state *state) +{ + if (IS_ERR_OR_NULL(state)) + return 1; + + return kref_put(&state->ref, a5xx_gpu_state_destroy); +} + + +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) +static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, + struct drm_printer *p) +{ + int i, j; + u32 pos = 0; + struct a5xx_gpu_state *a5xx_state = container_of(state, + struct a5xx_gpu_state, base); + + if (IS_ERR_OR_NULL(state)) + return; + + adreno_show(gpu, state, p); + + /* Dump the additional a5xx HLSQ registers */ + if (!a5xx_state->hlsqregs) + return; + + drm_printf(p, "registers-hlsq:\n"); + + for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) { + u32 o = a5xx_hlsq_aperture_regs[i].regoffset; + u32 c = a5xx_hlsq_aperture_regs[i].count; + + for (j = 0; j < c; j++, pos++, o++) { + /* + * To keep the crashdump simple we pull the entire range + * for each register type but not all of the registers + * in the range are valid. Fortunately invalid registers + * stick out like a sore thumb with a value of + * 0xdeadbeef + */ + if (a5xx_state->hlsqregs[pos] == 0xdeadbeef) + continue; + + drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", + o << 2, a5xx_state->hlsqregs[pos]); + } + } +} +#endif + +static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + return a5xx_gpu->cur_ring; +} + +static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) +{ + u64 busy_cycles; + + busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO); + *out_sample_rate = clk_get_rate(gpu->core_clk); + + return busy_cycles; +} + +static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (a5xx_gpu->has_whereami) + return a5xx_gpu->shadow[ring->id]; + + return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR); +} + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a5xx_hw_init, + .pm_suspend = a5xx_pm_suspend, + .pm_resume = a5xx_pm_resume, + .recover = a5xx_recover, + .submit = a5xx_submit, + .active_ring = a5xx_active_ring, + .irq = a5xx_irq, + .destroy = a5xx_destroy, +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + .show = a5xx_show, +#endif +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = a5xx_debugfs_init, +#endif + .gpu_busy = a5xx_gpu_busy, + .gpu_state_get = a5xx_gpu_state_get, + .gpu_state_put = a5xx_gpu_state_put, + .create_address_space = adreno_iommu_create_address_space, + .get_rptr = a5xx_get_rptr, + }, + .get_timestamp = a5xx_get_timestamp, +}; + +static void check_speed_bin(struct device *dev) +{ + struct nvmem_cell *cell; + u32 val; + + /* + * If the OPP table specifies a opp-supported-hw property then we have + * to set something with dev_pm_opp_set_supported_hw() or the table + * doesn't get populated so pick an arbitrary value that should + * ensure the default frequencies are selected but not conflict with any + * actual bins + */ + val = 0x80; + + cell = nvmem_cell_get(dev, "speed_bin"); + + if (!IS_ERR(cell)) { + void *buf = nvmem_cell_read(cell, NULL); + + if (!IS_ERR(buf)) { + u8 bin = *((u8 *) buf); + + val = (1 << bin); + kfree(buf); + } + + nvmem_cell_put(cell); + } + + devm_pm_opp_set_supported_hw(dev, &val, 1); +} + +struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; + struct a5xx_gpu *a5xx_gpu = NULL; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + unsigned int nr_rings; + int ret; + + if (!pdev) { + DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n"); + return ERR_PTR(-ENXIO); + } + + a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL); + if (!a5xx_gpu) + return ERR_PTR(-ENOMEM); + + adreno_gpu = &a5xx_gpu->base; + gpu = &adreno_gpu->base; + + adreno_gpu->registers = a5xx_registers; + + a5xx_gpu->lm_leakage = 0x4E001A; + + check_speed_bin(&pdev->dev); + + nr_rings = 4; + + if (adreno_cmp_rev(ADRENO_REV(5, 1, 0, ANY_ID), config->rev)) + nr_rings = 1; + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings); + if (ret) { + a5xx_destroy(&(a5xx_gpu->base.base)); + return ERR_PTR(ret); + } + + if (gpu->aspace) + msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); + + /* Set up the preemption specific bits and pieces for each ringbuffer */ + a5xx_preempt_init(gpu); + + return gpu; +} diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h new file mode 100644 index 000000000..c7187bcc5 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + */ +#ifndef __A5XX_GPU_H__ +#define __A5XX_GPU_H__ + +#include "adreno_gpu.h" + +/* Bringing over the hack from the previous targets */ +#undef ROP_COPY +#undef ROP_XOR + +#include "a5xx.xml.h" + +struct a5xx_gpu { + struct adreno_gpu base; + + struct drm_gem_object *pm4_bo; + uint64_t pm4_iova; + + struct drm_gem_object *pfp_bo; + uint64_t pfp_iova; + + struct drm_gem_object *gpmu_bo; + uint64_t gpmu_iova; + uint32_t gpmu_dwords; + + uint32_t lm_leakage; + + struct msm_ringbuffer *cur_ring; + struct msm_ringbuffer *next_ring; + + struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; + struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS]; + struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; + uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + + atomic_t preempt_state; + struct timer_list preempt_timer; + + struct drm_gem_object *shadow_bo; + uint64_t shadow_iova; + uint32_t *shadow; + + /* True if the microcode supports the WHERE_AM_I opcode */ + bool has_whereami; +}; + +#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) + +#ifdef CONFIG_DEBUG_FS +void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); +#endif + +/* + * In order to do lockless preemption we use a simple state machine to progress + * through the process. + * + * PREEMPT_NONE - no preemption in progress. Next state START. + * PREEMPT_START - The trigger is evaulating if preemption is possible. Next + * states: TRIGGERED, NONE + * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next + * state: NONE. + * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next + * states: FAULTED, PENDING + * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger + * recovery. Next state: N/A + * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is + * checking the success of the operation. Next state: FAULTED, NONE. + */ + +enum preempt_state { + PREEMPT_NONE = 0, + PREEMPT_START, + PREEMPT_ABORT, + PREEMPT_TRIGGERED, + PREEMPT_FAULTED, + PREEMPT_PENDING, +}; + +/* + * struct a5xx_preempt_record is a shared buffer between the microcode and the + * CPU to store the state for preemption. The record itself is much larger + * (64k) but most of that is used by the CP for storage. + * + * There is a preemption record assigned per ringbuffer. When the CPU triggers a + * preemption, it fills out the record with the useful information (wptr, ring + * base, etc) and the microcode uses that information to set up the CP following + * the preemption. When a ring is switched out, the CP will save the ringbuffer + * state back to the record. In this way, once the records are properly set up + * the CPU can quickly switch back and forth between ringbuffers by only + * updating a few registers (often only the wptr). + * + * These are the CPU aware registers in the record: + * @magic: Must always be 0x27C4BAFC + * @info: Type of the record - written 0 by the CPU, updated by the CP + * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by + * the CP + * @cntl: Value of RB_CNTL written by CPU, save/restored by CP + * @rptr: Value of RB_RPTR written by CPU, save/restored by CP + * @wptr: Value of RB_WPTR written by CPU, save/restored by CP + * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP + * @rbase: Value of RB_BASE written by CPU, save/restored by CP + * @counter: GPU address of the storage area for the performance counters + */ +struct a5xx_preempt_record { + uint32_t magic; + uint32_t info; + uint32_t data; + uint32_t cntl; + uint32_t rptr; + uint32_t wptr; + uint64_t rptr_addr; + uint64_t rbase; + uint64_t counter; +}; + +/* Magic identifier for the preemption record */ +#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL + +/* + * Even though the structure above is only a few bytes, we need a full 64k to + * store the entire preemption record from the CP + */ +#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024) + +/* + * The preemption counter block is a storage area for the value of the + * preemption counters that are saved immediately before context switch. We + * append it on to the end of the allocation for the preemption record. + */ +#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4) + + +int a5xx_power_init(struct msm_gpu *gpu); +void a5xx_gpmu_ucode_init(struct msm_gpu *gpu); + +static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, + uint32_t reg, uint32_t mask, uint32_t value) +{ + while (usecs--) { + udelay(1); + if ((gpu_read(gpu, reg) & mask) == value) + return 0; + cpu_relax(); + } + + return -ETIMEDOUT; +} + +#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \ + ((ring)->id * sizeof(uint32_t))) + +bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); + +void a5xx_preempt_init(struct msm_gpu *gpu); +void a5xx_preempt_hw_init(struct msm_gpu *gpu); +void a5xx_preempt_trigger(struct msm_gpu *gpu); +void a5xx_preempt_irq(struct msm_gpu *gpu); +void a5xx_preempt_fini(struct msm_gpu *gpu); + +void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync); + +/* Return true if we are in a preempt state */ +static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu) +{ + int preempt_state = atomic_read(&a5xx_gpu->preempt_state); + + return !(preempt_state == PREEMPT_NONE || + preempt_state == PREEMPT_ABORT); +} + +#endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c new file mode 100644 index 000000000..0e63a1429 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2016 The Linux Foundation. All rights reserved. + */ + +#include +#include "a5xx_gpu.h" + +/* + * The GPMU data block is a block of shared registers that can be used to + * communicate back and forth. These "registers" are by convention with the GPMU + * firwmare and not bound to any specific hardware design + */ + +#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE +#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5) +#define AGC_MSG_BASE (AGC_INIT_BASE + 7) + +#define AGC_MSG_STATE (AGC_MSG_BASE + 0) +#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1) +#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3) +#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o)) + +#define AGC_POWER_CONFIG_PRODUCTION_ID 1 +#define AGC_INIT_MSG_VALUE 0xBABEFACE + +/* AGC_LM_CONFIG (A540+) */ +#define AGC_LM_CONFIG (136/4) +#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17 +#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1 +#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8) +#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4) +#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4) +#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16) +#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24) + +#define AGC_LEVEL_CONFIG (140/4) + +static struct { + uint32_t reg; + uint32_t value; +} a5xx_sequence_regs[] = { + { 0xB9A1, 0x00010303 }, + { 0xB9A2, 0x13000000 }, + { 0xB9A3, 0x00460020 }, + { 0xB9A4, 0x10000000 }, + { 0xB9A5, 0x040A1707 }, + { 0xB9A6, 0x00010000 }, + { 0xB9A7, 0x0E000904 }, + { 0xB9A8, 0x10000000 }, + { 0xB9A9, 0x01165000 }, + { 0xB9AA, 0x000E0002 }, + { 0xB9AB, 0x03884141 }, + { 0xB9AC, 0x10000840 }, + { 0xB9AD, 0x572A5000 }, + { 0xB9AE, 0x00000003 }, + { 0xB9AF, 0x00000000 }, + { 0xB9B0, 0x10000000 }, + { 0xB828, 0x6C204010 }, + { 0xB829, 0x6C204011 }, + { 0xB82A, 0x6C204012 }, + { 0xB82B, 0x6C204013 }, + { 0xB82C, 0x6C204014 }, + { 0xB90F, 0x00000004 }, + { 0xB910, 0x00000002 }, + { 0xB911, 0x00000002 }, + { 0xB912, 0x00000002 }, + { 0xB913, 0x00000002 }, + { 0xB92F, 0x00000004 }, + { 0xB930, 0x00000005 }, + { 0xB931, 0x00000005 }, + { 0xB932, 0x00000005 }, + { 0xB933, 0x00000005 }, + { 0xB96F, 0x00000001 }, + { 0xB970, 0x00000003 }, + { 0xB94F, 0x00000004 }, + { 0xB950, 0x0000000B }, + { 0xB951, 0x0000000B }, + { 0xB952, 0x0000000B }, + { 0xB953, 0x0000000B }, + { 0xB907, 0x00000019 }, + { 0xB927, 0x00000019 }, + { 0xB947, 0x00000019 }, + { 0xB967, 0x00000019 }, + { 0xB987, 0x00000019 }, + { 0xB906, 0x00220001 }, + { 0xB926, 0x00220001 }, + { 0xB946, 0x00220001 }, + { 0xB966, 0x00220001 }, + { 0xB986, 0x00300000 }, + { 0xAC40, 0x0340FF41 }, + { 0xAC41, 0x03BEFED0 }, + { 0xAC42, 0x00331FED }, + { 0xAC43, 0x021FFDD3 }, + { 0xAC44, 0x5555AAAA }, + { 0xAC45, 0x5555AAAA }, + { 0xB9BA, 0x00000008 }, +}; + +/* + * Get the actual voltage value for the operating point at the specified + * frequency + */ +static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq) +{ + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct dev_pm_opp *opp; + u32 ret = 0; + + opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true); + + if (!IS_ERR(opp)) { + ret = dev_pm_opp_get_voltage(opp) / 1000; + dev_pm_opp_put(opp); + } + + return ret; +} + +/* Setup thermal limit management */ +static void a530_lm_setup(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + unsigned int i; + + /* Write the block of sequence registers */ + for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++) + gpu_write(gpu, a5xx_sequence_regs[i].reg, + a5xx_sequence_regs[i].value); + + /* Hard code the A530 GPU thermal sensor ID for the GPMU */ + gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007); + gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01); + gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01); + + /* Until we get clock scaling 0 is always the active power level */ + gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); + + gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage); + + /* The threshold is fixed at 6000 for A530 */ + gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000); + + gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF); + gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1); + + /* Write the voltage table */ + gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF); + gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1); + + gpu_write(gpu, AGC_MSG_STATE, 1); + gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID); + + /* Write the max power - hard coded to 5448 for A530 */ + gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448); + gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1); + + /* + * For now just write the one voltage level - we will do more when we + * can do scaling + */ + gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); + gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); + + gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t)); + gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE); +} + +#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32)) +#define LM_DCVS_LIMIT 1 +#define LEVEL_CONFIG ~(0x303) + +static void a540_lm_setup(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + u32 config; + + /* The battery current limiter isn't enabled for A540 */ + config = AGC_LM_CONFIG_BCL_DISABLED; + config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT; + + /* For now disable GPMU side throttling */ + config |= AGC_LM_CONFIG_THROTTLE_DISABLE; + + /* Until we get clock scaling 0 is always the active power level */ + gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); + + /* Fixed at 6000 for now */ + gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000); + + gpu_write(gpu, AGC_MSG_STATE, 0x80000001); + gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID); + + gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448); + gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1); + + gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate)); + gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000); + + gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config); + gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG); + gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, + PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1)); + + gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE); +} + +/* Enable SP/TP cpower collapse */ +static void a5xx_pc_init(struct msm_gpu *gpu) +{ + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F); + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0); + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080); + gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040); +} + +/* Enable the GPMU microcontroller */ +static int a5xx_gpmu_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + if (!a5xx_gpu->gpmu_dwords) + return 0; + + /* Turn off protected mode for this operation */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + /* Kick off the IB to load the GPMU microcode */ + OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova)); + OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova)); + OUT_RING(ring, a5xx_gpu->gpmu_dwords); + + /* Turn back on protected mode */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + + a5xx_flush(gpu, ring, true); + + if (!a5xx_idle(gpu, ring)) { + DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", + gpu->name); + return -EINVAL; + } + + if (adreno_is_a530(adreno_gpu)) + gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014); + + /* Kick off the GPMU */ + gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0); + + /* + * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just + * won't have advanced power collapse. + */ + if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF, + 0xBABEFACE)) + DRM_ERROR("%s: GPMU firmware initialization timed out\n", + gpu->name); + + if (!adreno_is_a530(adreno_gpu)) { + u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1); + + if (val) + DRM_ERROR("%s: GPMU firmware initialization failed: %d\n", + gpu->name, val); + } + + return 0; +} + +/* Enable limits management */ +static void a5xx_lm_enable(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + + /* This init sequence only applies to A530 */ + if (!adreno_is_a530(adreno_gpu)) + return; + + gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0); + gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A); + gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01); + gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000); + gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000); + + gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011); +} + +int a5xx_power_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + + /* Not all A5xx chips have a GPMU */ + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) + return 0; + + /* Set up the limits management */ + if (adreno_is_a530(adreno_gpu)) + a530_lm_setup(gpu); + else if (adreno_is_a540(adreno_gpu)) + a540_lm_setup(gpu); + + /* Set up SP/TP power collpase */ + a5xx_pc_init(gpu); + + /* Start the GPMU */ + ret = a5xx_gpmu_init(gpu); + if (ret) + return ret; + + /* Start the limits management */ + a5xx_lm_enable(gpu); + + return 0; +} + +void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct drm_device *drm = gpu->dev; + uint32_t dwords = 0, offset = 0, bosize; + unsigned int *data, *ptr, *cmds; + unsigned int cmds_size; + + if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) + return; + + if (a5xx_gpu->gpmu_bo) + return; + + data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data; + + /* + * The first dword is the size of the remaining data in dwords. Use it + * as a checksum of sorts and make sure it matches the actual size of + * the firmware that we read + */ + + if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 || + (data[0] < 2) || (data[0] >= + (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2))) + return; + + /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */ + if (data[1] != 2) + return; + + cmds = data + data[2] + 3; + cmds_size = data[0] - data[2] - 2; + + /* + * A single type4 opcode can only have so many values attached so + * add enough opcodes to load the all the commands + */ + bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; + + ptr = msm_gem_kernel_new(drm, bosize, + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, + &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); + if (IS_ERR(ptr)) + return; + + msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw"); + + while (cmds_size > 0) { + int i; + uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ? + TYPE4_MAX_PAYLOAD : cmds_size; + + ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset, + _size); + + for (i = 0; i < _size; i++) + ptr[dwords++] = *cmds++; + + offset += _size; + cmds_size -= _size; + } + + msm_gem_put_vaddr(a5xx_gpu->gpmu_bo); + a5xx_gpu->gpmu_dwords = dwords; +} diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c new file mode 100644 index 000000000..f58dd564d --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + */ + +#include "msm_gem.h" +#include "a5xx_gpu.h" + +/* + * Try to transition the preemption state from old to new. Return + * true on success or false if the original state wasn't 'old' + */ +static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu, + enum preempt_state old, enum preempt_state new) +{ + enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state, + old, new); + + return (cur == old); +} + +/* + * Force the preemption state to the specified state. This is used in cases + * where the current state is known and won't change + */ +static inline void set_preempt_state(struct a5xx_gpu *gpu, + enum preempt_state new) +{ + /* + * preempt_state may be read by other cores trying to trigger a + * preemption or in the interrupt handler so barriers are needed + * before... + */ + smp_mb__before_atomic(); + atomic_set(&gpu->preempt_state, new); + /* ... and after*/ + smp_mb__after_atomic(); +} + +/* Write the most recent wptr for the given ring into the hardware */ +static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + unsigned long flags; + uint32_t wptr; + + if (!ring) + return; + + spin_lock_irqsave(&ring->preempt_lock, flags); + wptr = get_wptr(ring); + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); +} + +/* Return the highest priority ringbuffer with something in it */ +static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) +{ + unsigned long flags; + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + bool empty; + struct msm_ringbuffer *ring = gpu->rb[i]; + + spin_lock_irqsave(&ring->preempt_lock, flags); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + if (!empty) + return ring; + } + + return NULL; +} + +static void a5xx_preempt_timer(struct timer_list *t) +{ + struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer); + struct msm_gpu *gpu = &a5xx_gpu->base.base; + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED)) + return; + + DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +/* Try to trigger a preemption switch */ +void a5xx_preempt_trigger(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + unsigned long flags; + struct msm_ringbuffer *ring; + + if (gpu->nr_rings == 1) + return; + + /* + * Try to start preemption by moving from NONE to START. If + * unsuccessful, a preemption is already in flight + */ + if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START)) + return; + + /* Get the next ring to preempt to */ + ring = get_next_ring(gpu); + + /* + * If no ring is populated or the highest priority ring is the current + * one do nothing except to update the wptr to the latest and greatest + */ + if (!ring || (a5xx_gpu->cur_ring == ring)) { + /* + * Its possible that while a preemption request is in progress + * from an irq context, a user context trying to submit might + * fail to update the write pointer, because it determines + * that the preempt state is not PREEMPT_NONE. + * + * Close the race by introducing an intermediate + * state PREEMPT_ABORT to let the submit path + * know that the ringbuffer is not going to change + * and can safely update the write pointer. + */ + + set_preempt_state(a5xx_gpu, PREEMPT_ABORT); + update_wptr(gpu, a5xx_gpu->cur_ring); + set_preempt_state(a5xx_gpu, PREEMPT_NONE); + return; + } + + /* Make sure the wptr doesn't update while we're in motion */ + spin_lock_irqsave(&ring->preempt_lock, flags); + a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + /* Set the address of the incoming preemption record */ + gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO, + a5xx_gpu->preempt_iova[ring->id]); + + a5xx_gpu->next_ring = ring; + + /* Start a timer to catch a stuck preemption */ + mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000)); + + /* Set the preemption state to triggered */ + set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED); + + /* Make sure everything is written before hitting the button */ + wmb(); + + /* And actually start the preemption */ + gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1); +} + +void a5xx_preempt_irq(struct msm_gpu *gpu) +{ + uint32_t status; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct drm_device *dev = gpu->dev; + + if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING)) + return; + + /* Delete the preemption watchdog timer */ + del_timer(&a5xx_gpu->preempt_timer); + + /* + * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before + * firing the interrupt, but there is a non zero chance of a hardware + * condition or a software race that could set it again before we have a + * chance to finish. If that happens, log and go for recovery + */ + status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL); + if (unlikely(status)) { + set_preempt_state(a5xx_gpu, PREEMPT_FAULTED); + DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n", + gpu->name); + kthread_queue_work(gpu->worker, &gpu->recover_work); + return; + } + + a5xx_gpu->cur_ring = a5xx_gpu->next_ring; + a5xx_gpu->next_ring = NULL; + + update_wptr(gpu, a5xx_gpu->cur_ring); + + set_preempt_state(a5xx_gpu, PREEMPT_NONE); +} + +void a5xx_preempt_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int i; + + /* Always come up on rb 0 */ + a5xx_gpu->cur_ring = gpu->rb[0]; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings == 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + a5xx_gpu->preempt[i]->wptr = 0; + a5xx_gpu->preempt[i]->rptr = 0; + a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; + a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]); + } + + /* Write a 0 to signal that we aren't switching pagetables */ + gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0); + + /* Reset the preemption state */ + set_preempt_state(a5xx_gpu, PREEMPT_NONE); +} + +static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, + struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = &a5xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a5xx_preempt_record *ptr; + void *counters; + struct drm_gem_object *bo = NULL, *counters_bo = NULL; + u64 iova = 0, counters_iova = 0; + + ptr = msm_gem_kernel_new(gpu->dev, + A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, + MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + /* The buffer to store counters needs to be unprivileged */ + counters = msm_gem_kernel_new(gpu->dev, + A5XX_PREEMPT_COUNTER_SIZE, + MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova); + if (IS_ERR(counters)) { + msm_gem_kernel_put(bo, gpu->aspace); + return PTR_ERR(counters); + } + + msm_gem_object_set_name(bo, "preempt"); + msm_gem_object_set_name(counters_bo, "preempt_counters"); + + a5xx_gpu->preempt_bo[ring->id] = bo; + a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo; + a5xx_gpu->preempt_iova[ring->id] = iova; + a5xx_gpu->preempt[ring->id] = ptr; + + /* Set up the defaults on the preemption record */ + + ptr->magic = A5XX_PREEMPT_RECORD_MAGIC; + ptr->info = 0; + ptr->data = 0; + ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE; + + ptr->counter = counters_iova; + + return 0; +} + +void a5xx_preempt_fini(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace); + msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace); + } +} + +void a5xx_preempt_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings <= 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) { + /* + * On any failure our adventure is over. Clean up and + * set nr_rings to 1 to force preemption off + */ + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; + + return; + } + } + + timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0); +} diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h new file mode 100644 index 000000000..beea4a7fc --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h @@ -0,0 +1,7780 @@ +#ifndef A6XX_XML +#define A6XX_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2022 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum a6xx_tile_mode { + TILE6_LINEAR = 0, + TILE6_2 = 2, + TILE6_3 = 3, +}; + +enum a6xx_format { + FMT6_A8_UNORM = 2, + FMT6_8_UNORM = 3, + FMT6_8_SNORM = 4, + FMT6_8_UINT = 5, + FMT6_8_SINT = 6, + FMT6_4_4_4_4_UNORM = 8, + FMT6_5_5_5_1_UNORM = 10, + FMT6_1_5_5_5_UNORM = 12, + FMT6_5_6_5_UNORM = 14, + FMT6_8_8_UNORM = 15, + FMT6_8_8_SNORM = 16, + FMT6_8_8_UINT = 17, + FMT6_8_8_SINT = 18, + FMT6_L8_A8_UNORM = 19, + FMT6_16_UNORM = 21, + FMT6_16_SNORM = 22, + FMT6_16_FLOAT = 23, + FMT6_16_UINT = 24, + FMT6_16_SINT = 25, + FMT6_8_8_8_UNORM = 33, + FMT6_8_8_8_SNORM = 34, + FMT6_8_8_8_UINT = 35, + FMT6_8_8_8_SINT = 36, + FMT6_8_8_8_8_UNORM = 48, + FMT6_8_8_8_X8_UNORM = 49, + FMT6_8_8_8_8_SNORM = 50, + FMT6_8_8_8_8_UINT = 51, + FMT6_8_8_8_8_SINT = 52, + FMT6_9_9_9_E5_FLOAT = 53, + FMT6_10_10_10_2_UNORM = 54, + FMT6_10_10_10_2_UNORM_DEST = 55, + FMT6_10_10_10_2_SNORM = 57, + FMT6_10_10_10_2_UINT = 58, + FMT6_10_10_10_2_SINT = 59, + FMT6_11_11_10_FLOAT = 66, + FMT6_16_16_UNORM = 67, + FMT6_16_16_SNORM = 68, + FMT6_16_16_FLOAT = 69, + FMT6_16_16_UINT = 70, + FMT6_16_16_SINT = 71, + FMT6_32_UNORM = 72, + FMT6_32_SNORM = 73, + FMT6_32_FLOAT = 74, + FMT6_32_UINT = 75, + FMT6_32_SINT = 76, + FMT6_32_FIXED = 77, + FMT6_16_16_16_UNORM = 88, + FMT6_16_16_16_SNORM = 89, + FMT6_16_16_16_FLOAT = 90, + FMT6_16_16_16_UINT = 91, + FMT6_16_16_16_SINT = 92, + FMT6_16_16_16_16_UNORM = 96, + FMT6_16_16_16_16_SNORM = 97, + FMT6_16_16_16_16_FLOAT = 98, + FMT6_16_16_16_16_UINT = 99, + FMT6_16_16_16_16_SINT = 100, + FMT6_32_32_UNORM = 101, + FMT6_32_32_SNORM = 102, + FMT6_32_32_FLOAT = 103, + FMT6_32_32_UINT = 104, + FMT6_32_32_SINT = 105, + FMT6_32_32_FIXED = 106, + FMT6_32_32_32_UNORM = 112, + FMT6_32_32_32_SNORM = 113, + FMT6_32_32_32_UINT = 114, + FMT6_32_32_32_SINT = 115, + FMT6_32_32_32_FLOAT = 116, + FMT6_32_32_32_FIXED = 117, + FMT6_32_32_32_32_UNORM = 128, + FMT6_32_32_32_32_SNORM = 129, + FMT6_32_32_32_32_FLOAT = 130, + FMT6_32_32_32_32_UINT = 131, + FMT6_32_32_32_32_SINT = 132, + FMT6_32_32_32_32_FIXED = 133, + FMT6_G8R8B8R8_422_UNORM = 140, + FMT6_R8G8R8B8_422_UNORM = 141, + FMT6_R8_G8B8_2PLANE_420_UNORM = 142, + FMT6_NV21 = 143, + FMT6_R8_G8_B8_3PLANE_420_UNORM = 144, + FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = 145, + FMT6_NV12_Y = 148, + FMT6_NV12_UV = 149, + FMT6_NV12_VU = 150, + FMT6_NV12_4R = 151, + FMT6_NV12_4R_Y = 152, + FMT6_NV12_4R_UV = 153, + FMT6_P010 = 154, + FMT6_P010_Y = 155, + FMT6_P010_UV = 156, + FMT6_TP10 = 157, + FMT6_TP10_Y = 158, + FMT6_TP10_UV = 159, + FMT6_Z24_UNORM_S8_UINT = 160, + FMT6_ETC2_RG11_UNORM = 171, + FMT6_ETC2_RG11_SNORM = 172, + FMT6_ETC2_R11_UNORM = 173, + FMT6_ETC2_R11_SNORM = 174, + FMT6_ETC1 = 175, + FMT6_ETC2_RGB8 = 176, + FMT6_ETC2_RGBA8 = 177, + FMT6_ETC2_RGB8A1 = 178, + FMT6_DXT1 = 179, + FMT6_DXT3 = 180, + FMT6_DXT5 = 181, + FMT6_RGTC1_UNORM = 183, + FMT6_RGTC1_SNORM = 184, + FMT6_RGTC2_UNORM = 187, + FMT6_RGTC2_SNORM = 188, + FMT6_BPTC_UFLOAT = 190, + FMT6_BPTC_FLOAT = 191, + FMT6_BPTC = 192, + FMT6_ASTC_4x4 = 193, + FMT6_ASTC_5x4 = 194, + FMT6_ASTC_5x5 = 195, + FMT6_ASTC_6x5 = 196, + FMT6_ASTC_6x6 = 197, + FMT6_ASTC_8x5 = 198, + FMT6_ASTC_8x6 = 199, + FMT6_ASTC_8x8 = 200, + FMT6_ASTC_10x5 = 201, + FMT6_ASTC_10x6 = 202, + FMT6_ASTC_10x8 = 203, + FMT6_ASTC_10x10 = 204, + FMT6_ASTC_12x10 = 205, + FMT6_ASTC_12x12 = 206, + FMT6_Z24_UINT_S8_UINT = 234, + FMT6_NONE = 255, +}; + +enum a6xx_polygon_mode { + POLYMODE6_POINTS = 1, + POLYMODE6_LINES = 2, + POLYMODE6_TRIANGLES = 3, +}; + +enum a6xx_depth_format { + DEPTH6_NONE = 0, + DEPTH6_16 = 1, + DEPTH6_24_8 = 2, + DEPTH6_32 = 4, +}; + +enum a6xx_shader_id { + A6XX_TP0_TMO_DATA = 9, + A6XX_TP0_SMO_DATA = 10, + A6XX_TP0_MIPMAP_BASE_DATA = 11, + A6XX_TP1_TMO_DATA = 25, + A6XX_TP1_SMO_DATA = 26, + A6XX_TP1_MIPMAP_BASE_DATA = 27, + A6XX_SP_INST_DATA = 41, + A6XX_SP_LB_0_DATA = 42, + A6XX_SP_LB_1_DATA = 43, + A6XX_SP_LB_2_DATA = 44, + A6XX_SP_LB_3_DATA = 45, + A6XX_SP_LB_4_DATA = 46, + A6XX_SP_LB_5_DATA = 47, + A6XX_SP_CB_BINDLESS_DATA = 48, + A6XX_SP_CB_LEGACY_DATA = 49, + A6XX_SP_UAV_DATA = 50, + A6XX_SP_INST_TAG = 51, + A6XX_SP_CB_BINDLESS_TAG = 52, + A6XX_SP_TMO_UMO_TAG = 53, + A6XX_SP_SMO_TAG = 54, + A6XX_SP_STATE_DATA = 55, + A6XX_HLSQ_CHUNK_CVS_RAM = 73, + A6XX_HLSQ_CHUNK_CPS_RAM = 74, + A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75, + A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76, + A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77, + A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78, + A6XX_HLSQ_CVS_MISC_RAM = 80, + A6XX_HLSQ_CPS_MISC_RAM = 81, + A6XX_HLSQ_INST_RAM = 82, + A6XX_HLSQ_GFX_CVS_CONST_RAM = 83, + A6XX_HLSQ_GFX_CPS_CONST_RAM = 84, + A6XX_HLSQ_CVS_MISC_RAM_TAG = 85, + A6XX_HLSQ_CPS_MISC_RAM_TAG = 86, + A6XX_HLSQ_INST_RAM_TAG = 87, + A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88, + A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89, + A6XX_HLSQ_PWR_REST_RAM = 90, + A6XX_HLSQ_PWR_REST_TAG = 91, + A6XX_HLSQ_DATAPATH_META = 96, + A6XX_HLSQ_FRONTEND_META = 97, + A6XX_HLSQ_INDIRECT_META = 98, + A6XX_HLSQ_BACKEND_META = 99, +}; + +enum a6xx_debugbus_id { + A6XX_DBGBUS_CP = 1, + A6XX_DBGBUS_RBBM = 2, + A6XX_DBGBUS_VBIF = 3, + A6XX_DBGBUS_HLSQ = 4, + A6XX_DBGBUS_UCHE = 5, + A6XX_DBGBUS_DPM = 6, + A6XX_DBGBUS_TESS = 7, + A6XX_DBGBUS_PC = 8, + A6XX_DBGBUS_VFDP = 9, + A6XX_DBGBUS_VPC = 10, + A6XX_DBGBUS_TSE = 11, + A6XX_DBGBUS_RAS = 12, + A6XX_DBGBUS_VSC = 13, + A6XX_DBGBUS_COM = 14, + A6XX_DBGBUS_LRZ = 16, + A6XX_DBGBUS_A2D = 17, + A6XX_DBGBUS_CCUFCHE = 18, + A6XX_DBGBUS_GMU_CX = 19, + A6XX_DBGBUS_RBP = 20, + A6XX_DBGBUS_DCS = 21, + A6XX_DBGBUS_DBGC = 22, + A6XX_DBGBUS_CX = 23, + A6XX_DBGBUS_GMU_GX = 24, + A6XX_DBGBUS_TPFCHE = 25, + A6XX_DBGBUS_GBIF_GX = 26, + A6XX_DBGBUS_GPC = 29, + A6XX_DBGBUS_LARC = 30, + A6XX_DBGBUS_HLSQ_SPTP = 31, + A6XX_DBGBUS_RB_0 = 32, + A6XX_DBGBUS_RB_1 = 33, + A6XX_DBGBUS_UCHE_WRAPPER = 36, + A6XX_DBGBUS_CCU_0 = 40, + A6XX_DBGBUS_CCU_1 = 41, + A6XX_DBGBUS_VFD_0 = 56, + A6XX_DBGBUS_VFD_1 = 57, + A6XX_DBGBUS_VFD_2 = 58, + A6XX_DBGBUS_VFD_3 = 59, + A6XX_DBGBUS_SP_0 = 64, + A6XX_DBGBUS_SP_1 = 65, + A6XX_DBGBUS_TPL1_0 = 72, + A6XX_DBGBUS_TPL1_1 = 73, + A6XX_DBGBUS_TPL1_2 = 74, + A6XX_DBGBUS_TPL1_3 = 75, +}; + +enum a6xx_cp_perfcounter_select { + PERF_CP_ALWAYS_COUNT = 0, + PERF_CP_BUSY_GFX_CORE_IDLE = 1, + PERF_CP_BUSY_CYCLES = 2, + PERF_CP_NUM_PREEMPTIONS = 3, + PERF_CP_PREEMPTION_REACTION_DELAY = 4, + PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5, + PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6, + PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7, + PERF_CP_PREDICATED_DRAWS_KILLED = 8, + PERF_CP_MODE_SWITCH = 9, + PERF_CP_ZPASS_DONE = 10, + PERF_CP_CONTEXT_DONE = 11, + PERF_CP_CACHE_FLUSH = 12, + PERF_CP_LONG_PREEMPTIONS = 13, + PERF_CP_SQE_I_CACHE_STARVE = 14, + PERF_CP_SQE_IDLE = 15, + PERF_CP_SQE_PM4_STARVE_RB_IB = 16, + PERF_CP_SQE_PM4_STARVE_SDS = 17, + PERF_CP_SQE_MRB_STARVE = 18, + PERF_CP_SQE_RRB_STARVE = 19, + PERF_CP_SQE_VSD_STARVE = 20, + PERF_CP_VSD_DECODE_STARVE = 21, + PERF_CP_SQE_PIPE_OUT_STALL = 22, + PERF_CP_SQE_SYNC_STALL = 23, + PERF_CP_SQE_PM4_WFI_STALL = 24, + PERF_CP_SQE_SYS_WFI_STALL = 25, + PERF_CP_SQE_T4_EXEC = 26, + PERF_CP_SQE_LOAD_STATE_EXEC = 27, + PERF_CP_SQE_SAVE_SDS_STATE = 28, + PERF_CP_SQE_DRAW_EXEC = 29, + PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30, + PERF_CP_SQE_EXEC_PROFILED = 31, + PERF_CP_MEMORY_POOL_EMPTY = 32, + PERF_CP_MEMORY_POOL_SYNC_STALL = 33, + PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34, + PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35, + PERF_CP_AHB_STALL_SQE_GMU = 36, + PERF_CP_AHB_STALL_SQE_WR_OTHER = 37, + PERF_CP_AHB_STALL_SQE_RD_OTHER = 38, + PERF_CP_CLUSTER0_EMPTY = 39, + PERF_CP_CLUSTER1_EMPTY = 40, + PERF_CP_CLUSTER2_EMPTY = 41, + PERF_CP_CLUSTER3_EMPTY = 42, + PERF_CP_CLUSTER4_EMPTY = 43, + PERF_CP_CLUSTER5_EMPTY = 44, + PERF_CP_PM4_DATA = 45, + PERF_CP_PM4_HEADERS = 46, + PERF_CP_VBIF_READ_BEATS = 47, + PERF_CP_VBIF_WRITE_BEATS = 48, + PERF_CP_SQE_INSTR_COUNTER = 49, +}; + +enum a6xx_rbbm_perfcounter_select { + PERF_RBBM_ALWAYS_COUNT = 0, + PERF_RBBM_ALWAYS_ON = 1, + PERF_RBBM_TSE_BUSY = 2, + PERF_RBBM_RAS_BUSY = 3, + PERF_RBBM_PC_DCALL_BUSY = 4, + PERF_RBBM_PC_VSD_BUSY = 5, + PERF_RBBM_STATUS_MASKED = 6, + PERF_RBBM_COM_BUSY = 7, + PERF_RBBM_DCOM_BUSY = 8, + PERF_RBBM_VBIF_BUSY = 9, + PERF_RBBM_VSC_BUSY = 10, + PERF_RBBM_TESS_BUSY = 11, + PERF_RBBM_UCHE_BUSY = 12, + PERF_RBBM_HLSQ_BUSY = 13, +}; + +enum a6xx_pc_perfcounter_select { + PERF_PC_BUSY_CYCLES = 0, + PERF_PC_WORKING_CYCLES = 1, + PERF_PC_STALL_CYCLES_VFD = 2, + PERF_PC_STALL_CYCLES_TSE = 3, + PERF_PC_STALL_CYCLES_VPC = 4, + PERF_PC_STALL_CYCLES_UCHE = 5, + PERF_PC_STALL_CYCLES_TESS = 6, + PERF_PC_STALL_CYCLES_TSE_ONLY = 7, + PERF_PC_STALL_CYCLES_VPC_ONLY = 8, + PERF_PC_PASS1_TF_STALL_CYCLES = 9, + PERF_PC_STARVE_CYCLES_FOR_INDEX = 10, + PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11, + PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12, + PERF_PC_STARVE_CYCLES_FOR_POSITION = 13, + PERF_PC_STARVE_CYCLES_DI = 14, + PERF_PC_VIS_STREAMS_LOADED = 15, + PERF_PC_INSTANCES = 16, + PERF_PC_VPC_PRIMITIVES = 17, + PERF_PC_DEAD_PRIM = 18, + PERF_PC_LIVE_PRIM = 19, + PERF_PC_VERTEX_HITS = 20, + PERF_PC_IA_VERTICES = 21, + PERF_PC_IA_PRIMITIVES = 22, + PERF_PC_GS_PRIMITIVES = 23, + PERF_PC_HS_INVOCATIONS = 24, + PERF_PC_DS_INVOCATIONS = 25, + PERF_PC_VS_INVOCATIONS = 26, + PERF_PC_GS_INVOCATIONS = 27, + PERF_PC_DS_PRIMITIVES = 28, + PERF_PC_VPC_POS_DATA_TRANSACTION = 29, + PERF_PC_3D_DRAWCALLS = 30, + PERF_PC_2D_DRAWCALLS = 31, + PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32, + PERF_TESS_BUSY_CYCLES = 33, + PERF_TESS_WORKING_CYCLES = 34, + PERF_TESS_STALL_CYCLES_PC = 35, + PERF_TESS_STARVE_CYCLES_PC = 36, + PERF_PC_TSE_TRANSACTION = 37, + PERF_PC_TSE_VERTEX = 38, + PERF_PC_TESS_PC_UV_TRANS = 39, + PERF_PC_TESS_PC_UV_PATCHES = 40, + PERF_PC_TESS_FACTOR_TRANS = 41, +}; + +enum a6xx_vfd_perfcounter_select { + PERF_VFD_BUSY_CYCLES = 0, + PERF_VFD_STALL_CYCLES_UCHE = 1, + PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2, + PERF_VFD_STALL_CYCLES_SP_INFO = 3, + PERF_VFD_STALL_CYCLES_SP_ATTR = 4, + PERF_VFD_STARVE_CYCLES_UCHE = 5, + PERF_VFD_RBUFFER_FULL = 6, + PERF_VFD_ATTR_INFO_FIFO_FULL = 7, + PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8, + PERF_VFD_NUM_ATTRIBUTES = 9, + PERF_VFD_UPPER_SHADER_FIBERS = 10, + PERF_VFD_LOWER_SHADER_FIBERS = 11, + PERF_VFD_MODE_0_FIBERS = 12, + PERF_VFD_MODE_1_FIBERS = 13, + PERF_VFD_MODE_2_FIBERS = 14, + PERF_VFD_MODE_3_FIBERS = 15, + PERF_VFD_MODE_4_FIBERS = 16, + PERF_VFD_TOTAL_VERTICES = 17, + PERF_VFDP_STALL_CYCLES_VFD = 18, + PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19, + PERF_VFDP_STALL_CYCLES_VFD_PROG = 20, + PERF_VFDP_STARVE_CYCLES_PC = 21, + PERF_VFDP_VS_STAGE_WAVES = 22, +}; + +enum a6xx_hlsq_perfcounter_select { + PERF_HLSQ_BUSY_CYCLES = 0, + PERF_HLSQ_STALL_CYCLES_UCHE = 1, + PERF_HLSQ_STALL_CYCLES_SP_STATE = 2, + PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3, + PERF_HLSQ_UCHE_LATENCY_CYCLES = 4, + PERF_HLSQ_UCHE_LATENCY_COUNT = 5, + PERF_HLSQ_FS_STAGE_1X_WAVES = 6, + PERF_HLSQ_FS_STAGE_2X_WAVES = 7, + PERF_HLSQ_QUADS = 8, + PERF_HLSQ_CS_INVOCATIONS = 9, + PERF_HLSQ_COMPUTE_DRAWCALLS = 10, + PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11, + PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12, + PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13, + PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14, + PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15, + PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16, + PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17, + PERF_HLSQ_STALL_CYCLES_VPC = 18, + PERF_HLSQ_PIXELS = 19, + PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20, +}; + +enum a6xx_vpc_perfcounter_select { + PERF_VPC_BUSY_CYCLES = 0, + PERF_VPC_WORKING_CYCLES = 1, + PERF_VPC_STALL_CYCLES_UCHE = 2, + PERF_VPC_STALL_CYCLES_VFD_WACK = 3, + PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4, + PERF_VPC_STALL_CYCLES_PC = 5, + PERF_VPC_STALL_CYCLES_SP_LM = 6, + PERF_VPC_STARVE_CYCLES_SP = 7, + PERF_VPC_STARVE_CYCLES_LRZ = 8, + PERF_VPC_PC_PRIMITIVES = 9, + PERF_VPC_SP_COMPONENTS = 10, + PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11, + PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12, + PERF_VPC_RB_VISIBLE_PRIMITIVES = 13, + PERF_VPC_LM_TRANSACTION = 14, + PERF_VPC_STREAMOUT_TRANSACTION = 15, + PERF_VPC_VS_BUSY_CYCLES = 16, + PERF_VPC_PS_BUSY_CYCLES = 17, + PERF_VPC_VS_WORKING_CYCLES = 18, + PERF_VPC_PS_WORKING_CYCLES = 19, + PERF_VPC_STARVE_CYCLES_RB = 20, + PERF_VPC_NUM_VPCRAM_READ_POS = 21, + PERF_VPC_WIT_FULL_CYCLES = 22, + PERF_VPC_VPCRAM_FULL_CYCLES = 23, + PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24, + PERF_VPC_NUM_VPCRAM_WRITE = 25, + PERF_VPC_NUM_VPCRAM_READ_SO = 26, + PERF_VPC_NUM_ATTR_REQ_LM = 27, +}; + +enum a6xx_tse_perfcounter_select { + PERF_TSE_BUSY_CYCLES = 0, + PERF_TSE_CLIPPING_CYCLES = 1, + PERF_TSE_STALL_CYCLES_RAS = 2, + PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3, + PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4, + PERF_TSE_STARVE_CYCLES_PC = 5, + PERF_TSE_INPUT_PRIM = 6, + PERF_TSE_INPUT_NULL_PRIM = 7, + PERF_TSE_TRIVAL_REJ_PRIM = 8, + PERF_TSE_CLIPPED_PRIM = 9, + PERF_TSE_ZERO_AREA_PRIM = 10, + PERF_TSE_FACENESS_CULLED_PRIM = 11, + PERF_TSE_ZERO_PIXEL_PRIM = 12, + PERF_TSE_OUTPUT_NULL_PRIM = 13, + PERF_TSE_OUTPUT_VISIBLE_PRIM = 14, + PERF_TSE_CINVOCATION = 15, + PERF_TSE_CPRIMITIVES = 16, + PERF_TSE_2D_INPUT_PRIM = 17, + PERF_TSE_2D_ALIVE_CYCLES = 18, + PERF_TSE_CLIP_PLANES = 19, +}; + +enum a6xx_ras_perfcounter_select { + PERF_RAS_BUSY_CYCLES = 0, + PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1, + PERF_RAS_STALL_CYCLES_LRZ = 2, + PERF_RAS_STARVE_CYCLES_TSE = 3, + PERF_RAS_SUPER_TILES = 4, + PERF_RAS_8X4_TILES = 5, + PERF_RAS_MASKGEN_ACTIVE = 6, + PERF_RAS_FULLY_COVERED_SUPER_TILES = 7, + PERF_RAS_FULLY_COVERED_8X4_TILES = 8, + PERF_RAS_PRIM_KILLED_INVISILBE = 9, + PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10, + PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11, + PERF_RAS_BLOCKS = 12, +}; + +enum a6xx_uche_perfcounter_select { + PERF_UCHE_BUSY_CYCLES = 0, + PERF_UCHE_STALL_CYCLES_ARBITER = 1, + PERF_UCHE_VBIF_LATENCY_CYCLES = 2, + PERF_UCHE_VBIF_LATENCY_SAMPLES = 3, + PERF_UCHE_VBIF_READ_BEATS_TP = 4, + PERF_UCHE_VBIF_READ_BEATS_VFD = 5, + PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6, + PERF_UCHE_VBIF_READ_BEATS_LRZ = 7, + PERF_UCHE_VBIF_READ_BEATS_SP = 8, + PERF_UCHE_READ_REQUESTS_TP = 9, + PERF_UCHE_READ_REQUESTS_VFD = 10, + PERF_UCHE_READ_REQUESTS_HLSQ = 11, + PERF_UCHE_READ_REQUESTS_LRZ = 12, + PERF_UCHE_READ_REQUESTS_SP = 13, + PERF_UCHE_WRITE_REQUESTS_LRZ = 14, + PERF_UCHE_WRITE_REQUESTS_SP = 15, + PERF_UCHE_WRITE_REQUESTS_VPC = 16, + PERF_UCHE_WRITE_REQUESTS_VSC = 17, + PERF_UCHE_EVICTS = 18, + PERF_UCHE_BANK_REQ0 = 19, + PERF_UCHE_BANK_REQ1 = 20, + PERF_UCHE_BANK_REQ2 = 21, + PERF_UCHE_BANK_REQ3 = 22, + PERF_UCHE_BANK_REQ4 = 23, + PERF_UCHE_BANK_REQ5 = 24, + PERF_UCHE_BANK_REQ6 = 25, + PERF_UCHE_BANK_REQ7 = 26, + PERF_UCHE_VBIF_READ_BEATS_CH0 = 27, + PERF_UCHE_VBIF_READ_BEATS_CH1 = 28, + PERF_UCHE_GMEM_READ_BEATS = 29, + PERF_UCHE_TPH_REF_FULL = 30, + PERF_UCHE_TPH_VICTIM_FULL = 31, + PERF_UCHE_TPH_EXT_FULL = 32, + PERF_UCHE_VBIF_STALL_WRITE_DATA = 33, + PERF_UCHE_DCMP_LATENCY_SAMPLES = 34, + PERF_UCHE_DCMP_LATENCY_CYCLES = 35, + PERF_UCHE_VBIF_READ_BEATS_PC = 36, + PERF_UCHE_READ_REQUESTS_PC = 37, + PERF_UCHE_RAM_READ_REQ = 38, + PERF_UCHE_RAM_WRITE_REQ = 39, +}; + +enum a6xx_tp_perfcounter_select { + PERF_TP_BUSY_CYCLES = 0, + PERF_TP_STALL_CYCLES_UCHE = 1, + PERF_TP_LATENCY_CYCLES = 2, + PERF_TP_LATENCY_TRANS = 3, + PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4, + PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5, + PERF_TP_L1_CACHELINE_REQUESTS = 6, + PERF_TP_L1_CACHELINE_MISSES = 7, + PERF_TP_SP_TP_TRANS = 8, + PERF_TP_TP_SP_TRANS = 9, + PERF_TP_OUTPUT_PIXELS = 10, + PERF_TP_FILTER_WORKLOAD_16BIT = 11, + PERF_TP_FILTER_WORKLOAD_32BIT = 12, + PERF_TP_QUADS_RECEIVED = 13, + PERF_TP_QUADS_OFFSET = 14, + PERF_TP_QUADS_SHADOW = 15, + PERF_TP_QUADS_ARRAY = 16, + PERF_TP_QUADS_GRADIENT = 17, + PERF_TP_QUADS_1D = 18, + PERF_TP_QUADS_2D = 19, + PERF_TP_QUADS_BUFFER = 20, + PERF_TP_QUADS_3D = 21, + PERF_TP_QUADS_CUBE = 22, + PERF_TP_DIVERGENT_QUADS_RECEIVED = 23, + PERF_TP_PRT_NON_RESIDENT_EVENTS = 24, + PERF_TP_OUTPUT_PIXELS_POINT = 25, + PERF_TP_OUTPUT_PIXELS_BILINEAR = 26, + PERF_TP_OUTPUT_PIXELS_MIP = 27, + PERF_TP_OUTPUT_PIXELS_ANISO = 28, + PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29, + PERF_TP_FLAG_CACHE_REQUESTS = 30, + PERF_TP_FLAG_CACHE_MISSES = 31, + PERF_TP_L1_5_L2_REQUESTS = 32, + PERF_TP_2D_OUTPUT_PIXELS = 33, + PERF_TP_2D_OUTPUT_PIXELS_POINT = 34, + PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35, + PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36, + PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37, + PERF_TP_TPA2TPC_TRANS = 38, + PERF_TP_L1_MISSES_ASTC_1TILE = 39, + PERF_TP_L1_MISSES_ASTC_2TILE = 40, + PERF_TP_L1_MISSES_ASTC_4TILE = 41, + PERF_TP_L1_5_L2_COMPRESS_REQS = 42, + PERF_TP_L1_5_L2_COMPRESS_MISS = 43, + PERF_TP_L1_BANK_CONFLICT = 44, + PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45, + PERF_TP_L1_5_MISS_LATENCY_TRANS = 46, + PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47, + PERF_TP_FRONTEND_WORKING_CYCLES = 48, + PERF_TP_L1_TAG_WORKING_CYCLES = 49, + PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50, + PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51, + PERF_TP_BACKEND_WORKING_CYCLES = 52, + PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53, + PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54, + PERF_TP_STARVE_CYCLES_SP = 55, + PERF_TP_STARVE_CYCLES_UCHE = 56, +}; + +enum a6xx_sp_perfcounter_select { + PERF_SP_BUSY_CYCLES = 0, + PERF_SP_ALU_WORKING_CYCLES = 1, + PERF_SP_EFU_WORKING_CYCLES = 2, + PERF_SP_STALL_CYCLES_VPC = 3, + PERF_SP_STALL_CYCLES_TP = 4, + PERF_SP_STALL_CYCLES_UCHE = 5, + PERF_SP_STALL_CYCLES_RB = 6, + PERF_SP_NON_EXECUTION_CYCLES = 7, + PERF_SP_WAVE_CONTEXTS = 8, + PERF_SP_WAVE_CONTEXT_CYCLES = 9, + PERF_SP_FS_STAGE_WAVE_CYCLES = 10, + PERF_SP_FS_STAGE_WAVE_SAMPLES = 11, + PERF_SP_VS_STAGE_WAVE_CYCLES = 12, + PERF_SP_VS_STAGE_WAVE_SAMPLES = 13, + PERF_SP_FS_STAGE_DURATION_CYCLES = 14, + PERF_SP_VS_STAGE_DURATION_CYCLES = 15, + PERF_SP_WAVE_CTRL_CYCLES = 16, + PERF_SP_WAVE_LOAD_CYCLES = 17, + PERF_SP_WAVE_EMIT_CYCLES = 18, + PERF_SP_WAVE_NOP_CYCLES = 19, + PERF_SP_WAVE_WAIT_CYCLES = 20, + PERF_SP_WAVE_FETCH_CYCLES = 21, + PERF_SP_WAVE_IDLE_CYCLES = 22, + PERF_SP_WAVE_END_CYCLES = 23, + PERF_SP_WAVE_LONG_SYNC_CYCLES = 24, + PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25, + PERF_SP_WAVE_JOIN_CYCLES = 26, + PERF_SP_LM_LOAD_INSTRUCTIONS = 27, + PERF_SP_LM_STORE_INSTRUCTIONS = 28, + PERF_SP_LM_ATOMICS = 29, + PERF_SP_GM_LOAD_INSTRUCTIONS = 30, + PERF_SP_GM_STORE_INSTRUCTIONS = 31, + PERF_SP_GM_ATOMICS = 32, + PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33, + PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34, + PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35, + PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36, + PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37, + PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38, + PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39, + PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40, + PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41, + PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42, + PERF_SP_VS_INSTRUCTIONS = 43, + PERF_SP_FS_INSTRUCTIONS = 44, + PERF_SP_ADDR_LOCK_COUNT = 45, + PERF_SP_UCHE_READ_TRANS = 46, + PERF_SP_UCHE_WRITE_TRANS = 47, + PERF_SP_EXPORT_VPC_TRANS = 48, + PERF_SP_EXPORT_RB_TRANS = 49, + PERF_SP_PIXELS_KILLED = 50, + PERF_SP_ICL1_REQUESTS = 51, + PERF_SP_ICL1_MISSES = 52, + PERF_SP_HS_INSTRUCTIONS = 53, + PERF_SP_DS_INSTRUCTIONS = 54, + PERF_SP_GS_INSTRUCTIONS = 55, + PERF_SP_CS_INSTRUCTIONS = 56, + PERF_SP_GPR_READ = 57, + PERF_SP_GPR_WRITE = 58, + PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59, + PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60, + PERF_SP_LM_BANK_CONFLICTS = 61, + PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62, + PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63, + PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64, + PERF_SP_LM_WORKING_CYCLES = 65, + PERF_SP_DISPATCHER_WORKING_CYCLES = 66, + PERF_SP_SEQUENCER_WORKING_CYCLES = 67, + PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68, + PERF_SP_STARVE_CYCLES_HLSQ = 69, + PERF_SP_NON_EXECUTION_LS_CYCLES = 70, + PERF_SP_WORKING_EU = 71, + PERF_SP_ANY_EU_WORKING = 72, + PERF_SP_WORKING_EU_FS_STAGE = 73, + PERF_SP_ANY_EU_WORKING_FS_STAGE = 74, + PERF_SP_WORKING_EU_VS_STAGE = 75, + PERF_SP_ANY_EU_WORKING_VS_STAGE = 76, + PERF_SP_WORKING_EU_CS_STAGE = 77, + PERF_SP_ANY_EU_WORKING_CS_STAGE = 78, + PERF_SP_GPR_READ_PREFETCH = 79, + PERF_SP_GPR_READ_CONFLICT = 80, + PERF_SP_GPR_WRITE_CONFLICT = 81, + PERF_SP_GM_LOAD_LATENCY_CYCLES = 82, + PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83, + PERF_SP_EXECUTABLE_WAVES = 84, +}; + +enum a6xx_rb_perfcounter_select { + PERF_RB_BUSY_CYCLES = 0, + PERF_RB_STALL_CYCLES_HLSQ = 1, + PERF_RB_STALL_CYCLES_FIFO0_FULL = 2, + PERF_RB_STALL_CYCLES_FIFO1_FULL = 3, + PERF_RB_STALL_CYCLES_FIFO2_FULL = 4, + PERF_RB_STARVE_CYCLES_SP = 5, + PERF_RB_STARVE_CYCLES_LRZ_TILE = 6, + PERF_RB_STARVE_CYCLES_CCU = 7, + PERF_RB_STARVE_CYCLES_Z_PLANE = 8, + PERF_RB_STARVE_CYCLES_BARY_PLANE = 9, + PERF_RB_Z_WORKLOAD = 10, + PERF_RB_HLSQ_ACTIVE = 11, + PERF_RB_Z_READ = 12, + PERF_RB_Z_WRITE = 13, + PERF_RB_C_READ = 14, + PERF_RB_C_WRITE = 15, + PERF_RB_TOTAL_PASS = 16, + PERF_RB_Z_PASS = 17, + PERF_RB_Z_FAIL = 18, + PERF_RB_S_FAIL = 19, + PERF_RB_BLENDED_FXP_COMPONENTS = 20, + PERF_RB_BLENDED_FP16_COMPONENTS = 21, + PERF_RB_PS_INVOCATIONS = 22, + PERF_RB_2D_ALIVE_CYCLES = 23, + PERF_RB_2D_STALL_CYCLES_A2D = 24, + PERF_RB_2D_STARVE_CYCLES_SRC = 25, + PERF_RB_2D_STARVE_CYCLES_SP = 26, + PERF_RB_2D_STARVE_CYCLES_DST = 27, + PERF_RB_2D_VALID_PIXELS = 28, + PERF_RB_3D_PIXELS = 29, + PERF_RB_BLENDER_WORKING_CYCLES = 30, + PERF_RB_ZPROC_WORKING_CYCLES = 31, + PERF_RB_CPROC_WORKING_CYCLES = 32, + PERF_RB_SAMPLER_WORKING_CYCLES = 33, + PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34, + PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35, + PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36, + PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37, + PERF_RB_STALL_CYCLES_VPC = 38, + PERF_RB_2D_INPUT_TRANS = 39, + PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40, + PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41, + PERF_RB_BLENDED_FP32_COMPONENTS = 42, + PERF_RB_COLOR_PIX_TILES = 43, + PERF_RB_STALL_CYCLES_CCU = 44, + PERF_RB_EARLY_Z_ARB3_GRANT = 45, + PERF_RB_LATE_Z_ARB3_GRANT = 46, + PERF_RB_EARLY_Z_SKIP_GRANT = 47, +}; + +enum a6xx_vsc_perfcounter_select { + PERF_VSC_BUSY_CYCLES = 0, + PERF_VSC_WORKING_CYCLES = 1, + PERF_VSC_STALL_CYCLES_UCHE = 2, + PERF_VSC_EOT_NUM = 3, + PERF_VSC_INPUT_TILES = 4, +}; + +enum a6xx_ccu_perfcounter_select { + PERF_CCU_BUSY_CYCLES = 0, + PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1, + PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2, + PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3, + PERF_CCU_DEPTH_BLOCKS = 4, + PERF_CCU_COLOR_BLOCKS = 5, + PERF_CCU_DEPTH_BLOCK_HIT = 6, + PERF_CCU_COLOR_BLOCK_HIT = 7, + PERF_CCU_PARTIAL_BLOCK_READ = 8, + PERF_CCU_GMEM_READ = 9, + PERF_CCU_GMEM_WRITE = 10, + PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11, + PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12, + PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13, + PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14, + PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15, + PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16, + PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17, + PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18, + PERF_CCU_COLOR_READ_FLAG0_COUNT = 19, + PERF_CCU_COLOR_READ_FLAG1_COUNT = 20, + PERF_CCU_COLOR_READ_FLAG2_COUNT = 21, + PERF_CCU_COLOR_READ_FLAG3_COUNT = 22, + PERF_CCU_COLOR_READ_FLAG4_COUNT = 23, + PERF_CCU_COLOR_READ_FLAG5_COUNT = 24, + PERF_CCU_COLOR_READ_FLAG6_COUNT = 25, + PERF_CCU_COLOR_READ_FLAG8_COUNT = 26, + PERF_CCU_2D_RD_REQ = 27, + PERF_CCU_2D_WR_REQ = 28, +}; + +enum a6xx_lrz_perfcounter_select { + PERF_LRZ_BUSY_CYCLES = 0, + PERF_LRZ_STARVE_CYCLES_RAS = 1, + PERF_LRZ_STALL_CYCLES_RB = 2, + PERF_LRZ_STALL_CYCLES_VSC = 3, + PERF_LRZ_STALL_CYCLES_VPC = 4, + PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5, + PERF_LRZ_STALL_CYCLES_UCHE = 6, + PERF_LRZ_LRZ_READ = 7, + PERF_LRZ_LRZ_WRITE = 8, + PERF_LRZ_READ_LATENCY = 9, + PERF_LRZ_MERGE_CACHE_UPDATING = 10, + PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11, + PERF_LRZ_PRIM_KILLED_BY_LRZ = 12, + PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13, + PERF_LRZ_FULL_8X8_TILES = 14, + PERF_LRZ_PARTIAL_8X8_TILES = 15, + PERF_LRZ_TILE_KILLED = 16, + PERF_LRZ_TOTAL_PIXEL = 17, + PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18, + PERF_LRZ_FULLY_COVERED_TILES = 19, + PERF_LRZ_PARTIAL_COVERED_TILES = 20, + PERF_LRZ_FEEDBACK_ACCEPT = 21, + PERF_LRZ_FEEDBACK_DISCARD = 22, + PERF_LRZ_FEEDBACK_STALL = 23, + PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24, + PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25, + PERF_LRZ_STALL_CYCLES_VC = 26, + PERF_LRZ_RAS_MASK_TRANS = 27, +}; + +enum a6xx_cmp_perfcounter_select { + PERF_CMPDECMP_STALL_CYCLES_ARB = 0, + PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1, + PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2, + PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3, + PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4, + PERF_CMPDECMP_VBIF_READ_REQUEST = 5, + PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6, + PERF_CMPDECMP_VBIF_READ_DATA = 7, + PERF_CMPDECMP_VBIF_WRITE_DATA = 8, + PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9, + PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10, + PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11, + PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12, + PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13, + PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14, + PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15, + PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16, + PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17, + PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18, + PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19, + PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20, + PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21, + PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22, + PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23, + PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24, + PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25, + PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26, + PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27, + PERF_CMPDECMP_2D_RD_DATA = 28, + PERF_CMPDECMP_2D_WR_DATA = 29, + PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30, + PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31, + PERF_CMPDECMP_2D_OUTPUT_TRANS = 32, + PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33, + PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34, + PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35, + PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36, + PERF_CMPDECMP_2D_BUSY_CYCLES = 37, + PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38, + PERF_CMPDECMP_2D_PIXELS = 39, +}; + +enum a6xx_2d_ifmt { + R2D_UNORM8 = 16, + R2D_INT32 = 7, + R2D_INT16 = 6, + R2D_INT8 = 5, + R2D_FLOAT32 = 4, + R2D_FLOAT16 = 3, + R2D_UNORM8_SRGB = 1, + R2D_RAW = 0, +}; + +enum a6xx_ztest_mode { + A6XX_EARLY_Z = 0, + A6XX_LATE_Z = 1, + A6XX_EARLY_LRZ_LATE_Z = 2, +}; + +enum a6xx_sequenced_thread_dist { + DIST_SCREEN_COORD = 0, + DIST_ALL_TO_RB0 = 1, +}; + +enum a6xx_single_prim_mode { + NO_FLUSH = 0, + FLUSH_PER_OVERLAP_AND_OVERWRITE = 1, + FLUSH_PER_OVERLAP = 3, +}; + +enum a6xx_raster_mode { + TYPE_TILED = 0, + TYPE_WRITER = 1, +}; + +enum a6xx_raster_direction { + LR_TB = 0, + RL_TB = 1, + LR_BT = 2, + RB_BT = 3, +}; + +enum a6xx_render_mode { + RENDERING_PASS = 0, + BINNING_PASS = 1, +}; + +enum a6xx_buffers_location { + BUFFERS_IN_GMEM = 0, + BUFFERS_IN_SYSMEM = 3, +}; + +enum a6xx_fragcoord_sample_mode { + FRAGCOORD_CENTER = 0, + FRAGCOORD_SAMPLE = 3, +}; + +enum a6xx_rotation { + ROTATE_0 = 0, + ROTATE_90 = 1, + ROTATE_180 = 2, + ROTATE_270 = 3, + ROTATE_HFLIP = 4, + ROTATE_VFLIP = 5, +}; + +enum a6xx_tess_spacing { + TESS_EQUAL = 0, + TESS_FRACTIONAL_ODD = 2, + TESS_FRACTIONAL_EVEN = 3, +}; + +enum a6xx_tess_output { + TESS_POINTS = 0, + TESS_LINES = 1, + TESS_CW_TRIS = 2, + TESS_CCW_TRIS = 3, +}; + +enum a6xx_threadsize { + THREAD64 = 0, + THREAD128 = 1, +}; + +enum a6xx_isam_mode { + ISAMMODE_GL = 2, +}; + +enum a6xx_tex_filter { + A6XX_TEX_NEAREST = 0, + A6XX_TEX_LINEAR = 1, + A6XX_TEX_ANISO = 2, + A6XX_TEX_CUBIC = 3, +}; + +enum a6xx_tex_clamp { + A6XX_TEX_REPEAT = 0, + A6XX_TEX_CLAMP_TO_EDGE = 1, + A6XX_TEX_MIRROR_REPEAT = 2, + A6XX_TEX_CLAMP_TO_BORDER = 3, + A6XX_TEX_MIRROR_CLAMP = 4, +}; + +enum a6xx_tex_aniso { + A6XX_TEX_ANISO_1 = 0, + A6XX_TEX_ANISO_2 = 1, + A6XX_TEX_ANISO_4 = 2, + A6XX_TEX_ANISO_8 = 3, + A6XX_TEX_ANISO_16 = 4, +}; + +enum a6xx_reduction_mode { + A6XX_REDUCTION_MODE_AVERAGE = 0, + A6XX_REDUCTION_MODE_MIN = 1, + A6XX_REDUCTION_MODE_MAX = 2, +}; + +enum a6xx_tex_swiz { + A6XX_TEX_X = 0, + A6XX_TEX_Y = 1, + A6XX_TEX_Z = 2, + A6XX_TEX_W = 3, + A6XX_TEX_ZERO = 4, + A6XX_TEX_ONE = 5, +}; + +enum a6xx_tex_type { + A6XX_TEX_1D = 0, + A6XX_TEX_2D = 1, + A6XX_TEX_CUBE = 2, + A6XX_TEX_3D = 3, + A6XX_TEX_BUFFER = 4, +}; + +#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001 +#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002 +#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040 +#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080 +#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100 +#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200 +#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400 +#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800 +#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000 +#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000 +#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000 +#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000 +#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000 +#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000 +#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000 +#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000 +#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000 +#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000 +#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000 +#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000 +#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000 +#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000 +#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000 +#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001 +#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002 +#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004 +#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010 +#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020 +#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040 +#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080 +#define REG_A6XX_CP_RB_BASE 0x00000800 + +#define REG_A6XX_CP_RB_BASE_HI 0x00000801 + +#define REG_A6XX_CP_RB_CNTL 0x00000802 + +#define REG_A6XX_CP_RB_RPTR_ADDR_LO 0x00000804 + +#define REG_A6XX_CP_RB_RPTR_ADDR_HI 0x00000805 + +#define REG_A6XX_CP_RB_RPTR 0x00000806 + +#define REG_A6XX_CP_RB_WPTR 0x00000807 + +#define REG_A6XX_CP_SQE_CNTL 0x00000808 + +#define REG_A6XX_CP_CP2GMU_STATUS 0x00000812 +#define A6XX_CP_CP2GMU_STATUS_IFPC 0x00000001 + +#define REG_A6XX_CP_HW_FAULT 0x00000821 + +#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823 + +#define REG_A6XX_CP_PROTECT_STATUS 0x00000824 + +#define REG_A6XX_CP_SQE_INSTR_BASE 0x00000830 + +#define REG_A6XX_CP_MISC_CNTL 0x00000840 + +#define REG_A6XX_CP_APRIV_CNTL 0x00000844 + +#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1 +#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK 0x000000ff +#define A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_LO(uint32_t val) +{ + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_LO__MASK; +} +#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK 0x0000ff00 +#define A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT 8 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_RB_HI(uint32_t val) +{ + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_RB_HI__MASK; +} +#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000 +#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val) +{ + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK; +} +#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK 0xff000000 +#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT 24 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val) +{ + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK; +} + +#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2 +#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK 0x000001ff +#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT 0 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val) +{ + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK; +} +#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK 0xffff0000 +#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT 16 +static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val) +{ + return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK; +} + +#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3 + +#define REG_A6XX_CP_CHICKEN_DBG 0x00000841 + +#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842 + +#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843 + +#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f + +static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; } + +static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; } + +static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; } + +static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; } +#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff +#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0 +static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val) +{ + return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK; +} +#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000 +#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18 +static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) +{ + return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK; +} +#define A6XX_CP_PROTECT_REG_READ 0x80000000 + +#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0 + +#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x000008a1 + +#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x000008a2 + +#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x000008a3 + +#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x000008a4 + +#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x000008a5 + +#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x000008a6 + +#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x000008a7 + +#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8 + +static inline uint32_t REG_A6XX_CP_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008d0 + 0x1*i0; } + +#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900 + +#define REG_A6XX_CP_CRASH_SCRIPT_BASE_HI 0x00000901 + +#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902 + +#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903 + +#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908 + +#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909 + +#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a + +#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b + +#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c + +#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d + +#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e + +#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f + +#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910 + +#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911 + +#define REG_A6XX_CP_IB1_BASE 0x00000928 + +#define REG_A6XX_CP_IB1_BASE_HI 0x00000929 + +#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a + +#define REG_A6XX_CP_IB2_BASE 0x0000092b + +#define REG_A6XX_CP_IB2_BASE_HI 0x0000092c + +#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d + +#define REG_A6XX_CP_SDS_BASE 0x0000092e + +#define REG_A6XX_CP_SDS_BASE_HI 0x0000092f + +#define REG_A6XX_CP_SDS_REM_SIZE 0x00000930 + +#define REG_A6XX_CP_MRB_BASE 0x00000931 + +#define REG_A6XX_CP_MRB_BASE_HI 0x00000932 + +#define REG_A6XX_CP_MRB_REM_SIZE 0x00000933 + +#define REG_A6XX_CP_VSD_BASE 0x00000934 + +#define REG_A6XX_CP_VSD_BASE_HI 0x00000935 + +#define REG_A6XX_CP_MRB_DWORDS 0x00000946 + +#define REG_A6XX_CP_VSD_DWORDS 0x00000947 + +#define REG_A6XX_CP_CSQ_IB1_STAT 0x00000949 +#define A6XX_CP_CSQ_IB1_STAT_REM__MASK 0xffff0000 +#define A6XX_CP_CSQ_IB1_STAT_REM__SHIFT 16 +static inline uint32_t A6XX_CP_CSQ_IB1_STAT_REM(uint32_t val) +{ + return ((val) << A6XX_CP_CSQ_IB1_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB1_STAT_REM__MASK; +} + +#define REG_A6XX_CP_CSQ_IB2_STAT 0x0000094a +#define A6XX_CP_CSQ_IB2_STAT_REM__MASK 0xffff0000 +#define A6XX_CP_CSQ_IB2_STAT_REM__SHIFT 16 +static inline uint32_t A6XX_CP_CSQ_IB2_STAT_REM(uint32_t val) +{ + return ((val) << A6XX_CP_CSQ_IB2_STAT_REM__SHIFT) & A6XX_CP_CSQ_IB2_STAT_REM__MASK; +} + +#define REG_A6XX_CP_MRQ_MRB_STAT 0x0000094c +#define A6XX_CP_MRQ_MRB_STAT_REM__MASK 0xffff0000 +#define A6XX_CP_MRQ_MRB_STAT_REM__SHIFT 16 +static inline uint32_t A6XX_CP_MRQ_MRB_STAT_REM(uint32_t val) +{ + return ((val) << A6XX_CP_MRQ_MRB_STAT_REM__SHIFT) & A6XX_CP_MRQ_MRB_STAT_REM__MASK; +} + +#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980 + +#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981 + +#define REG_A6XX_CP_AHB_CNTL 0x0000098d + +#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00 + +#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03 + +#define REG_A6XX_CP_LPAC_PROG_FIFO_SIZE 0x00000b34 + +#define REG_A6XX_CP_LPAC_SQE_INSTR_BASE 0x00000b82 + +#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01 + +#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201 + +#define REG_A6XX_RBBM_STATUS 0x00000210 +#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000 +#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000 +#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000 +#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000 +#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000 +#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000 +#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000 +#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000 +#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000 +#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000 +#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000 +#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000 +#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800 +#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400 +#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200 +#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100 +#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080 +#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040 +#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020 +#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010 +#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008 +#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004 +#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002 +#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001 + +#define REG_A6XX_RBBM_STATUS3 0x00000213 +#define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000 + +#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215 + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000400 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000041c + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000424 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000434 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000444 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000450 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000045c + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000466 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000046e + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000476 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000048e + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000004a6 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000004d6 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000004e6 + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000004ea + 0x2*i0; } + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000004f2 + 0x2*i0; } + +#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500 + +#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501 + +#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502 + +#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503 + +#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504 + +#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505 + +#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506 + +static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00000507 + 0x1*i0; } + +#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b + +#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD 0x0000050e + +#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS 0x0000050f + +#define REG_A6XX_RBBM_ISDB_CNT 0x00000533 + +#define REG_A6XX_RBBM_PRIMCTR_0_LO 0x00000540 + +#define REG_A6XX_RBBM_PRIMCTR_0_HI 0x00000541 + +#define REG_A6XX_RBBM_PRIMCTR_1_LO 0x00000542 + +#define REG_A6XX_RBBM_PRIMCTR_1_HI 0x00000543 + +#define REG_A6XX_RBBM_PRIMCTR_2_LO 0x00000544 + +#define REG_A6XX_RBBM_PRIMCTR_2_HI 0x00000545 + +#define REG_A6XX_RBBM_PRIMCTR_3_LO 0x00000546 + +#define REG_A6XX_RBBM_PRIMCTR_3_HI 0x00000547 + +#define REG_A6XX_RBBM_PRIMCTR_4_LO 0x00000548 + +#define REG_A6XX_RBBM_PRIMCTR_4_HI 0x00000549 + +#define REG_A6XX_RBBM_PRIMCTR_5_LO 0x0000054a + +#define REG_A6XX_RBBM_PRIMCTR_5_HI 0x0000054b + +#define REG_A6XX_RBBM_PRIMCTR_6_LO 0x0000054c + +#define REG_A6XX_RBBM_PRIMCTR_6_HI 0x0000054d + +#define REG_A6XX_RBBM_PRIMCTR_7_LO 0x0000054e + +#define REG_A6XX_RBBM_PRIMCTR_7_HI 0x0000054f + +#define REG_A6XX_RBBM_PRIMCTR_8_LO 0x00000550 + +#define REG_A6XX_RBBM_PRIMCTR_8_HI 0x00000551 + +#define REG_A6XX_RBBM_PRIMCTR_9_LO 0x00000552 + +#define REG_A6XX_RBBM_PRIMCTR_9_HI 0x00000553 + +#define REG_A6XX_RBBM_PRIMCTR_10_LO 0x00000554 + +#define REG_A6XX_RBBM_PRIMCTR_10_HI 0x00000555 + +#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400 + +#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800 + +#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801 + +#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802 + +#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803 + +#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810 + +#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010 + +#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011 + +#define REG_A6XX_RBBM_GBIF_HALT 0x00000016 + +#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017 + +#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c +#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001 + +#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f + +#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037 + +#define REG_A6XX_RBBM_INT_0_MASK 0x00000038 + +#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042 + +#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043 + +#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044 + +#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045 + +#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046 + +#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae + +#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0 + +#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1 + +#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2 + +#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7 + +#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8 + +#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9 + +#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba + +#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb + +#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc + +#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd + +#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be + +#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf + +#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0 + +#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1 + +#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2 + +#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7 + +#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8 + +#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9 + +#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca + +#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb + +#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc + +#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd + +#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce + +#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf + +#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0 + +#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1 + +#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2 + +#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3 + +#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4 + +#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5 + +#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6 + +#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7 + +#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8 + +#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9 + +#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da + +#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db + +#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc + +#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd + +#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de + +#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df + +#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0 + +#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1 + +#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2 + +#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3 + +#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4 + +#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5 + +#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6 + +#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7 + +#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8 + +#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9 + +#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea + +#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb + +#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec + +#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed + +#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee + +#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef + +#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0 + +#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1 + +#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2 + +#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7 + +#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8 + +#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9 + +#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa + +#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb + +#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100 + +#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101 + +#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102 + +#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103 + +#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104 + +#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105 + +#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106 + +#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107 + +#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108 + +#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109 + +#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a + +#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b + +#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c + +#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d + +#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e + +#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f + +#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110 + +#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111 + +#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112 + +#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113 + +#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114 + +#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115 + +#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116 + +#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117 + +#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118 + +#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119 + +#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a + +#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b + +#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c + +#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ 0x0000011d + +#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE 0x00000120 + +#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE 0x00000121 + +#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122 + +#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600 + +#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601 + +#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602 + +#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603 +#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff +#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00 +#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK; +} + +#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604 +#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f +#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000 +#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000 +#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK; +} + +#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605 +#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000 +#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK; +} + +#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608 + +#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609 + +#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a + +#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b + +#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c + +#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d + +#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e + +#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f + +#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK; +} + +#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK; +} +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000 +#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28 +static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val) +{ + return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK; +} + +#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f + +#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630 + +static inline uint32_t REG_A6XX_VSC_PERFCTR_VSC_SEL(uint32_t i0) { return 0x00000cd8 + 0x1*i0; } + +#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800 + +#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000 + +#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00 + +#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01 + +#define REG_A6XX_UCHE_WRITE_RANGE_MAX_LO 0x00000e05 + +#define REG_A6XX_UCHE_WRITE_RANGE_MAX_HI 0x00000e06 + +#define REG_A6XX_UCHE_WRITE_THRU_BASE_LO 0x00000e07 + +#define REG_A6XX_UCHE_WRITE_THRU_BASE_HI 0x00000e08 + +#define REG_A6XX_UCHE_TRAP_BASE_LO 0x00000e09 + +#define REG_A6XX_UCHE_TRAP_BASE_HI 0x00000e0a + +#define REG_A6XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e0b + +#define REG_A6XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e0c + +#define REG_A6XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e0d + +#define REG_A6XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e0e + +#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17 + +#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18 + +#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19 +#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff +#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0 +static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val) +{ + return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK; +} + +static inline uint32_t REG_A6XX_UCHE_PERFCTR_UCHE_SEL(uint32_t i0) { return 0x00000e1c + 0x1*i0; } + +#define REG_A6XX_UCHE_CMDQ_CONFIG 0x00000e3c + +#define REG_A6XX_VBIF_VERSION 0x00003000 + +#define REG_A6XX_VBIF_CLKON 0x00003001 +#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002 + +#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a + +#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080 + +#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081 + +#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084 + +#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085 + +#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086 +#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f +#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0 +static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val) +{ + return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK; +} + +#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087 + +#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088 +#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff +#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0 +static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val) +{ + return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK; +} + +#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c + +#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0 + +#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1 + +#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2 + +#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3 + +#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8 + +#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9 + +#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da + +#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db + +#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0 + +#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1 + +#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2 + +#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119 + +#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a + +#define REG_A6XX_GBIF_SCACHE_CNTL0 0x00003c01 + +#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02 + +#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03 + +#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04 + +#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05 + +#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06 + +#define REG_A6XX_GBIF_HALT 0x00003c45 + +#define REG_A6XX_GBIF_HALT_ACK 0x00003c46 + +#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0 + +#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2 + +#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3 + +#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4 + +#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5 + +#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6 + +#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7 + +#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8 + +#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9 + +#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca + +#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb + +#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc + +#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd + +#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce + +#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf + +#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0 + +#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1 + +#define REG_A6XX_VSC_DBG_ECO_CNTL 0x00000c00 + +#define REG_A6XX_VSC_BIN_SIZE 0x00000c02 +#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff +#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val) +{ + return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK; +} +#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00 +#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8 +static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val) +{ + return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK; +} + +#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03 + +#define REG_A6XX_VSC_BIN_COUNT 0x00000c06 +#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe +#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1 +static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val) +{ + return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK; +} +#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800 +#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11 +static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val) +{ + return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK; +} + +static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; } +#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff +#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0 +static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val) +{ + return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK; +} +#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00 +#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10 +static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val) +{ + return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK; +} +#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000 +#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20 +static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val) +{ + return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK; +} +#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000 +#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26 +static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val) +{ + return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK; +} + +#define REG_A6XX_VSC_PRIM_STRM_ADDRESS 0x00000c30 + +#define REG_A6XX_VSC_PRIM_STRM_PITCH 0x00000c32 + +#define REG_A6XX_VSC_PRIM_STRM_LIMIT 0x00000c33 + +#define REG_A6XX_VSC_DRAW_STRM_ADDRESS 0x00000c34 + +#define REG_A6XX_VSC_DRAW_STRM_PITCH 0x00000c36 + +#define REG_A6XX_VSC_DRAW_STRM_LIMIT 0x00000c37 + +static inline uint32_t REG_A6XX_VSC_STATE(uint32_t i0) { return 0x00000c38 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE(uint32_t i0) { return 0x00000c58 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; } + +#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12 + +#define REG_A6XX_GRAS_CL_CNTL 0x00008000 +#define A6XX_GRAS_CL_CNTL_CLIP_DISABLE 0x00000001 +#define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE 0x00000002 +#define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE 0x00000004 +#define A6XX_GRAS_CL_CNTL_UNK5 0x00000020 +#define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040 +#define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE 0x00000080 +#define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE 0x00000100 +#define A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE 0x00000200 + +#define REG_A6XX_GRAS_VS_CL_CNTL 0x00008001 +#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff +#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK; +} +#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 +#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8 +static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK; +} + +#define REG_A6XX_GRAS_DS_CL_CNTL 0x00008002 +#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK 0x000000ff +#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK; +} +#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 +#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT 8 +static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CULL_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK; +} + +#define REG_A6XX_GRAS_GS_CL_CNTL 0x00008003 +#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK 0x000000ff +#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK; +} +#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK 0x0000ff00 +#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT 8 +static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CULL_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK; +} + +#define REG_A6XX_GRAS_MAX_LAYER_INDEX 0x00008004 + +#define REG_A6XX_GRAS_CNTL 0x00008005 +#define A6XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001 +#define A6XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002 +#define A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004 +#define A6XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008 +#define A6XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010 +#define A6XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020 +#define A6XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0 +#define A6XX_GRAS_CNTL_COORD_MASK__SHIFT 6 +static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK; +} + +#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006 +#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000001ff +#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val) +{ + return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK; +} +#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x0007fc00 +#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10 +static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val) +{ + return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_VPORT(uint32_t i0) { return 0x00008010 + 0x6*i0; } + +static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; } +#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff +#define A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XSCALE(uint32_t i0) { return 0x00008011 + 0x6*i0; } +#define A6XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff +#define A6XX_GRAS_CL_VPORT_XSCALE__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YOFFSET(uint32_t i0) { return 0x00008012 + 0x6*i0; } +#define A6XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff +#define A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YSCALE(uint32_t i0) { return 0x00008013 + 0x6*i0; } +#define A6XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff +#define A6XX_GRAS_CL_VPORT_YSCALE__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZOFFSET(uint32_t i0) { return 0x00008014 + 0x6*i0; } +#define A6XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff +#define A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZSCALE(uint32_t i0) { return 0x00008015 + 0x6*i0; } +#define A6XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff +#define A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP(uint32_t i0) { return 0x00008070 + 0x2*i0; } + +static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; } +#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK 0xffffffff +#define A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MIN(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MIN__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MAX(uint32_t i0) { return 0x00008071 + 0x2*i0; } +#define A6XX_GRAS_CL_Z_CLAMP_MAX__MASK 0xffffffff +#define A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT 0 +static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MAX(float val) +{ + return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MAX__MASK; +} + +#define REG_A6XX_GRAS_SU_CNTL 0x00008090 +#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001 +#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002 +#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004 +#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8 +#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3 +static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val) +{ + return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK; +} +#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800 +#define A6XX_GRAS_SU_CNTL_UNK12__MASK 0x00001000 +#define A6XX_GRAS_SU_CNTL_UNK12__SHIFT 12 +static inline uint32_t A6XX_GRAS_SU_CNTL_UNK12(uint32_t val) +{ + return ((val) << A6XX_GRAS_SU_CNTL_UNK12__SHIFT) & A6XX_GRAS_SU_CNTL_UNK12__MASK; +} +#define A6XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000 +#define A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13 +static inline uint32_t A6XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val) +{ + return ((val) << A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A6XX_GRAS_SU_CNTL_LINE_MODE__MASK; +} +#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x00018000 +#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT 15 +static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val) +{ + return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK; +} +#define A6XX_GRAS_SU_CNTL_UNK17 0x00020000 +#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00040000 +#define A6XX_GRAS_SU_CNTL_UNK19__MASK 0x00780000 +#define A6XX_GRAS_SU_CNTL_UNK19__SHIFT 19 +static inline uint32_t A6XX_GRAS_SU_CNTL_UNK19(uint32_t val) +{ + return ((val) << A6XX_GRAS_SU_CNTL_UNK19__SHIFT) & A6XX_GRAS_SU_CNTL_UNK19__MASK; +} + +#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091 +#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff +#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 +static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val) +{ + return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK; +} +#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 +#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 +static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val) +{ + return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK; +} + +#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092 +#define A6XX_GRAS_SU_POINT_SIZE__MASK 0x0000ffff +#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0 +static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val) +{ + return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK; +} + +#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094 +#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003 +#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0 +static inline uint32_t A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val) +{ + return ((val) << A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK; +} + +#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095 +#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff +#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0 +static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val) +{ + return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK; +} + +#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096 +#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff +#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) +{ + return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; +} + +#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097 +#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff +#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0 +static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val) +{ + return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK; +} + +#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098 +#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 +#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val) +{ + return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; +} +#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000008 +#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT 3 +static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3(uint32_t val) +{ + return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK; +} + +#define REG_A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x00008099 +#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001 +#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK 0x00000006 +#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT 1 +static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT(uint32_t val) +{ + return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK; +} +#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_INNERCONSERVATIVERASEN 0x00000008 +#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK 0x00000030 +#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT 4 +static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4(uint32_t val) +{ + return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK; +} + +#define REG_A6XX_GRAS_SU_PATH_RENDERING_CNTL 0x0000809a +#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_UNK0 0x00000001 +#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_LINELENGTHEN 0x00000002 + +#define REG_A6XX_GRAS_VS_LAYER_CNTL 0x0000809b +#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_LAYER 0x00000001 +#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_VIEW 0x00000002 + +#define REG_A6XX_GRAS_GS_LAYER_CNTL 0x0000809c +#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER 0x00000001 +#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW 0x00000002 + +#define REG_A6XX_GRAS_DS_LAYER_CNTL 0x0000809d +#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_LAYER 0x00000001 +#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_VIEW 0x00000002 + +#define REG_A6XX_GRAS_SC_CNTL 0x000080a0 +#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000007 +#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT 0 +static inline uint32_t A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK; +} +#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK 0x00000018 +#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT 3 +static inline uint32_t A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE(enum a6xx_single_prim_mode val) +{ + return ((val) << A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK; +} +#define A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK 0x00000020 +#define A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT 5 +static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_MODE(enum a6xx_raster_mode val) +{ + return ((val) << A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK; +} +#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK 0x000000c0 +#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT 6 +static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val) +{ + return ((val) << A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK; +} +#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK 0x00000100 +#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT 8 +static inline uint32_t A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION(enum a6xx_sequenced_thread_dist val) +{ + return ((val) << A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT) & A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK; +} +#define A6XX_GRAS_SC_CNTL_UNK9__MASK 0x00000e00 +#define A6XX_GRAS_SC_CNTL_UNK9__SHIFT 9 +static inline uint32_t A6XX_GRAS_SC_CNTL_UNK9(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_CNTL_UNK9__SHIFT) & A6XX_GRAS_SC_CNTL_UNK9__MASK; +} +#define A6XX_GRAS_SC_CNTL_EARLYVIZOUTEN 0x00001000 + +#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1 +#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x0000003f +#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0 +static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val) +{ + return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK; +} +#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x00007f00 +#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8 +static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val) +{ + return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK; +} +#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000 +#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT 18 +static inline uint32_t A6XX_GRAS_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val) +{ + return ((val) << A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK; +} +#define A6XX_GRAS_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000 +#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000 +#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22 +static inline uint32_t A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val) +{ + return ((val) << A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK; +} +#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000 +#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24 +static inline uint32_t A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK; +} +#define A6XX_GRAS_BIN_CONTROL_UNK27__MASK 0x08000000 +#define A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT 27 +static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK27(uint32_t val) +{ + return ((val) << A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK27__MASK; +} + +#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2 +#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK; +} +#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK 0x00000004 +#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT 2 +static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK2(uint32_t val) +{ + return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK; +} +#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK 0x00000008 +#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT 3 +static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK3(uint32_t val) +{ + return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK; +} + +#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3 +#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A6XX_GRAS_SAMPLE_CONFIG 0x000080a4 +#define A6XX_GRAS_SAMPLE_CONFIG_UNK0 0x00000001 +#define A6XX_GRAS_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002 + +#define REG_A6XX_GRAS_SAMPLE_LOCATION_0 0x000080a5 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000 +#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK; +} + +#define REG_A6XX_GRAS_SAMPLE_LOCATION_1 0x000080a6 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK; +} +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000 +#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28 +static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK; +} + +#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af + +static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR(uint32_t i0) { return 0x000080b0 + 0x2*i0; } + +static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; } +#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x0000ffff +#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK; +} +#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0xffff0000 +#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR(uint32_t i0) { return 0x000080b1 + 0x2*i0; } +#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x0000ffff +#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK; +} +#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0xffff0000 +#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(uint32_t i0) { return 0x000080d0 + 0x2*i0; } + +static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; } +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK 0x0000ffff +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK; +} +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK 0xffff0000 +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK; +} + +static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(uint32_t i0) { return 0x000080d1 + 0x2*i0; } +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK 0x0000ffff +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK; +} +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK 0xffff0000 +#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK; +} + +#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0 +#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00003fff +#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK; +} +#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x3fff0000 +#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK; +} + +#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1 +#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00003fff +#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK; +} +#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x3fff0000 +#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK; +} + +#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100 +#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001 +#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002 +#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004 +#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008 +#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010 +#define A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE 0x00000020 +#define A6XX_GRAS_LRZ_CNTL_UNK6__MASK 0x000003c0 +#define A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT 6 +static inline uint32_t A6XX_GRAS_LRZ_CNTL_UNK6(uint32_t val) +{ + return ((val) << A6XX_GRAS_LRZ_CNTL_UNK6__SHIFT) & A6XX_GRAS_LRZ_CNTL_UNK6__MASK; +} + +#define REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL 0x00008101 +#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID 0x00000001 +#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK 0x00000006 +#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT 1 +static inline uint32_t A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val) +{ + return ((val) << A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK; +} + +#define REG_A6XX_GRAS_LRZ_MRT_BUF_INFO_0 0x00008102 +#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK 0x000000ff +#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT) & A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK; +} + +#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103 +#define A6XX_GRAS_LRZ_BUFFER_BASE__MASK 0xffffffff +#define A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT 0 +static inline uint32_t A6XX_GRAS_LRZ_BUFFER_BASE(uint32_t val) +{ + return ((val) << A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_BUFFER_BASE__MASK; +} + +#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105 +#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000000ff +#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0 +static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val) +{ + return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK; +} +#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffffc00 +#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 10 +static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 4) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106 +#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK 0xffffffff +#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT 0 +static inline uint32_t A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(uint32_t val) +{ + return ((val) << A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK; +} + +#define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109 +#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001 + +#define REG_A6XX_GRAS_UNKNOWN_810A 0x0000810a +#define A6XX_GRAS_UNKNOWN_810A_UNK0__MASK 0x000007ff +#define A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT 0 +static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK0(uint32_t val) +{ + return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK0__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK0__MASK; +} +#define A6XX_GRAS_UNKNOWN_810A_UNK16__MASK 0x07ff0000 +#define A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT 16 +static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK16(uint32_t val) +{ + return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK16__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK16__MASK; +} +#define A6XX_GRAS_UNKNOWN_810A_UNK28__MASK 0xf0000000 +#define A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT 28 +static inline uint32_t A6XX_GRAS_UNKNOWN_810A_UNK28(uint32_t val) +{ + return ((val) << A6XX_GRAS_UNKNOWN_810A_UNK28__SHIFT) & A6XX_GRAS_UNKNOWN_810A_UNK28__MASK; +} + +#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110 + +#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400 +#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK 0x00000007 +#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT 0 +static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val) +{ + return ((val) << A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK; +} +#define A6XX_GRAS_2D_BLIT_CNTL_OVERWRITEEN 0x00000008 +#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK 0x00000070 +#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT 4 +static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK4(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK; +} +#define A6XX_GRAS_2D_BLIT_CNTL_SOLID_COLOR 0x00000080 +#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00 +#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8 +static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK; +} +#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000 +#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK 0x00060000 +#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT 17 +static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK17(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK; +} +#define A6XX_GRAS_2D_BLIT_CNTL_D24S8 0x00080000 +#define A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK 0x00f00000 +#define A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT 20 +static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_MASK(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK; +} +#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK 0x1f000000 +#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT 24 +static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val) +{ + return ((val) << A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK; +} +#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000 +#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29 +static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val) +{ + return ((val) << A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK; +} + +#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401 + +#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402 + +#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403 + +#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404 + +#define REG_A6XX_GRAS_2D_DST_TL 0x00008405 +#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00003fff +#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK; +} +#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x3fff0000 +#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK; +} + +#define REG_A6XX_GRAS_2D_DST_BR 0x00008406 +#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00003fff +#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK; +} +#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x3fff0000 +#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK; +} + +#define REG_A6XX_GRAS_2D_UNKNOWN_8407 0x00008407 + +#define REG_A6XX_GRAS_2D_UNKNOWN_8408 0x00008408 + +#define REG_A6XX_GRAS_2D_UNKNOWN_8409 0x00008409 + +#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_1 0x0000840a +#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK 0x00003fff +#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK; +} +#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK 0x3fff0000 +#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK; +} + +#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_2 0x0000840b +#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK 0x00003fff +#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT 0 +static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_X(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK; +} +#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK 0x3fff0000 +#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT 16 +static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val) +{ + return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK; +} + +#define REG_A6XX_GRAS_DBG_ECO_CNTL 0x00008600 +#define A6XX_GRAS_DBG_ECO_CNTL_UNK7 0x00000080 +#define A6XX_GRAS_DBG_ECO_CNTL_LRZCACHELOCKDIS 0x00000800 + +#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601 + +static inline uint32_t REG_A6XX_GRAS_PERFCTR_TSE_SEL(uint32_t i0) { return 0x00008610 + 0x1*i0; } + +static inline uint32_t REG_A6XX_GRAS_PERFCTR_RAS_SEL(uint32_t i0) { return 0x00008614 + 0x1*i0; } + +static inline uint32_t REG_A6XX_GRAS_PERFCTR_LRZ_SEL(uint32_t i0) { return 0x00008618 + 0x1*i0; } + +#define REG_A6XX_RB_BIN_CONTROL 0x00008800 +#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f +#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0 +static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val) +{ + return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK; +} +#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00 +#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8 +static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val) +{ + return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK; +} +#define A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000 +#define A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18 +static inline uint32_t A6XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val) +{ + return ((val) << A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK; +} +#define A6XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000 +#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000 +#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22 +static inline uint32_t A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val) +{ + return ((val) << A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK; +} +#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000 +#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24 +static inline uint32_t A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val) +{ + return ((val) << A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK; +} + +#define REG_A6XX_RB_RENDER_CNTL 0x00008801 +#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000038 +#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT 3 +static inline uint32_t A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK; +} +#define A6XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040 +#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080 +#define A6XX_RB_RENDER_CNTL_UNK8__MASK 0x00000700 +#define A6XX_RB_RENDER_CNTL_UNK8__SHIFT 8 +static inline uint32_t A6XX_RB_RENDER_CNTL_UNK8(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_CNTL_UNK8__SHIFT) & A6XX_RB_RENDER_CNTL_UNK8__MASK; +} +#define A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100 +#define A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8 +static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val) +{ + return ((val) << A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK; +} +#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600 +#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9 +static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val) +{ + return ((val) << A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK; +} +#define A6XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800 +#define A6XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000 +#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000 +#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000 +#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16 +static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK; +} + +#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802 +#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK; +} +#define A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK 0x00000004 +#define A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT 2 +static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK2(uint32_t val) +{ + return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK; +} +#define A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK 0x00000008 +#define A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT 3 +static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK3(uint32_t val) +{ + return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK; +} + +#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803 +#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A6XX_RB_SAMPLE_CONFIG 0x00008804 +#define A6XX_RB_SAMPLE_CONFIG_UNK0 0x00000001 +#define A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002 + +#define REG_A6XX_RB_SAMPLE_LOCATION_0 0x00008805 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000 +#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK; +} + +#define REG_A6XX_RB_SAMPLE_LOCATION_1 0x00008806 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK; +} +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000 +#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28 +static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK; +} + +#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809 +#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001 +#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002 +#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004 +#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008 +#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010 +#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020 +#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0 +#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6 +static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK; +} +#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400 + +#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a +#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001 +#define A6XX_RB_RENDER_CONTROL1_UNK1 0x00000002 +#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000004 +#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008 +#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK 0x00000030 +#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT 4 +static inline uint32_t A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val) +{ + return ((val) << A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK; +} +#define A6XX_RB_RENDER_CONTROL1_SIZE 0x00000040 +#define A6XX_RB_RENDER_CONTROL1_LINELENGTHEN 0x00000080 +#define A6XX_RB_RENDER_CONTROL1_FOVEATION 0x00000100 + +#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b +#define A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001 +#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002 +#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK 0x00000004 +#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF 0x00000008 + +#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c +#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f +#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0 +static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val) +{ + return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK; +} + +#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d +#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f +#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK; +} +#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0 +#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK; +} +#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00 +#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK; +} +#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000 +#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK; +} +#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000 +#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK; +} +#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000 +#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK; +} +#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000 +#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK; +} +#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000 +#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28 +static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val) +{ + return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK; +} + +#define REG_A6XX_RB_DITHER_CNTL 0x0000880e +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003 +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK; +} +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK; +} +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030 +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK; +} +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0 +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK; +} +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300 +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK; +} +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00 +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK; +} +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000 +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK; +} +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000 +#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14 +static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val) +{ + return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK; +} + +#define REG_A6XX_RB_SRGB_CNTL 0x0000880f +#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001 +#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002 +#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004 +#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008 +#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010 +#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020 +#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040 +#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080 + +#define REG_A6XX_RB_SAMPLE_CNTL 0x00008810 +#define A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001 + +#define REG_A6XX_RB_UNKNOWN_8811 0x00008811 + +#define REG_A6XX_RB_UNKNOWN_8818 0x00008818 + +#define REG_A6XX_RB_UNKNOWN_8819 0x00008819 + +#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a + +#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b + +#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c + +#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d + +#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e + +static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; } + +static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; } +#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001 +#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002 +#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004 +#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078 +#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3 +static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val) +{ + return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK; +} +#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780 +#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7 +static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val) +{ + return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; } +#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f +#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0 +static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK; +} +#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0 +#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5 +static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK; +} +#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00 +#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8 +static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK; +} +#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000 +#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16 +static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK; +} +#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000 +#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21 +static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val) +{ + return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK; +} +#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000 +#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24 +static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val) +{ + return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; } +#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; +} +#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300 +#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8 +static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val) +{ + return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK; +} +#define A6XX_RB_MRT_BUF_INFO_UNK10__MASK 0x00000400 +#define A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT 10 +static inline uint32_t A6XX_RB_MRT_BUF_INFO_UNK10(uint32_t val) +{ + return ((val) << A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT) & A6XX_RB_MRT_BUF_INFO_UNK10__MASK; +} +#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000 +#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13 +static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; } +#define A6XX_RB_MRT_PITCH__MASK 0x0000ffff +#define A6XX_RB_MRT_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; } +#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0x1fffffff +#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; } +#define A6XX_RB_MRT_BASE__MASK 0xffffffff +#define A6XX_RB_MRT_BASE__SHIFT 0 +static inline uint32_t A6XX_RB_MRT_BASE(uint32_t val) +{ + return ((val) << A6XX_RB_MRT_BASE__SHIFT) & A6XX_RB_MRT_BASE__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; } +#define A6XX_RB_MRT_BASE_GMEM__MASK 0xfffff000 +#define A6XX_RB_MRT_BASE_GMEM__SHIFT 12 +static inline uint32_t A6XX_RB_MRT_BASE_GMEM(uint32_t val) +{ + return ((val >> 12) << A6XX_RB_MRT_BASE_GMEM__SHIFT) & A6XX_RB_MRT_BASE_GMEM__MASK; +} + +#define REG_A6XX_RB_BLEND_RED_F32 0x00008860 +#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff +#define A6XX_RB_BLEND_RED_F32__SHIFT 0 +static inline uint32_t A6XX_RB_BLEND_RED_F32(float val) +{ + return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK; +} + +#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861 +#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff +#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0 +static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val) +{ + return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK; +} + +#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862 +#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff +#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0 +static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val) +{ + return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK; +} + +#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863 +#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff +#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0 +static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val) +{ + return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK; +} + +#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864 +#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff +#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0 +static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val) +{ + return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK; +} +#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100 +#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00 +#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9 +static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val) +{ + return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK; +} + +#define REG_A6XX_RB_BLEND_CNTL 0x00008865 +#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff +#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 +static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val) +{ + return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK; +} +#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100 +#define A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200 +#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 +#define A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE 0x00000800 +#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000 +#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16 +static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val) +{ + return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK; +} + +#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870 +#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003 +#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0 +static inline uint32_t A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val) +{ + return ((val) << A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK; +} + +#define REG_A6XX_RB_DEPTH_CNTL 0x00008871 +#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001 +#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002 +#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c +#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2 +static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val) +{ + return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK; +} +#define A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE 0x00000020 +#define A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040 +#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE 0x00000080 + +#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872 +#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007 +#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0 +static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val) +{ + return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK; +} +#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018 +#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3 +static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val) +{ + return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK; +} + +#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873 +#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0x00003fff +#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK; +} + +#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874 +#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0x0fffffff +#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875 +#define A6XX_RB_DEPTH_BUFFER_BASE__MASK 0xffffffff +#define A6XX_RB_DEPTH_BUFFER_BASE__SHIFT 0 +static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE(uint32_t val) +{ + return ((val) << A6XX_RB_DEPTH_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE__MASK; +} + +#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877 +#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK 0xfffff000 +#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT 12 +static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE_GMEM(uint32_t val) +{ + return ((val >> 12) << A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK; +} + +#define REG_A6XX_RB_Z_BOUNDS_MIN 0x00008878 +#define A6XX_RB_Z_BOUNDS_MIN__MASK 0xffffffff +#define A6XX_RB_Z_BOUNDS_MIN__SHIFT 0 +static inline uint32_t A6XX_RB_Z_BOUNDS_MIN(float val) +{ + return ((fui(val)) << A6XX_RB_Z_BOUNDS_MIN__SHIFT) & A6XX_RB_Z_BOUNDS_MIN__MASK; +} + +#define REG_A6XX_RB_Z_BOUNDS_MAX 0x00008879 +#define A6XX_RB_Z_BOUNDS_MAX__MASK 0xffffffff +#define A6XX_RB_Z_BOUNDS_MAX__SHIFT 0 +static inline uint32_t A6XX_RB_Z_BOUNDS_MAX(float val) +{ + return ((fui(val)) << A6XX_RB_Z_BOUNDS_MAX__SHIFT) & A6XX_RB_Z_BOUNDS_MAX__MASK; +} + +#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880 +#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001 +#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002 +#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004 +#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700 +#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK; +} +#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800 +#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK; +} +#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000 +#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK; +} +#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000 +#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK; +} +#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000 +#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK; +} +#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000 +#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK; +} +#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000 +#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK; +} +#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000 +#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29 +static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val) +{ + return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK; +} + +#define REG_A6XX_RB_STENCIL_INFO 0x00008881 +#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001 +#define A6XX_RB_STENCIL_INFO_UNK1 0x00000002 + +#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882 +#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0x00000fff +#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK; +} + +#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883 +#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0x00ffffff +#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884 +#define A6XX_RB_STENCIL_BUFFER_BASE__MASK 0xffffffff +#define A6XX_RB_STENCIL_BUFFER_BASE__SHIFT 0 +static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE(uint32_t val) +{ + return ((val) << A6XX_RB_STENCIL_BUFFER_BASE__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE__MASK; +} + +#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886 +#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK 0xfffff000 +#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT 12 +static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE_GMEM(uint32_t val) +{ + return ((val >> 12) << A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK; +} + +#define REG_A6XX_RB_STENCILREF 0x00008887 +#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff +#define A6XX_RB_STENCILREF_REF__SHIFT 0 +static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val) +{ + return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK; +} +#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00 +#define A6XX_RB_STENCILREF_BFREF__SHIFT 8 +static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val) +{ + return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK; +} + +#define REG_A6XX_RB_STENCILMASK 0x00008888 +#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff +#define A6XX_RB_STENCILMASK_MASK__SHIFT 0 +static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val) +{ + return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK; +} +#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00 +#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8 +static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val) +{ + return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK; +} + +#define REG_A6XX_RB_STENCILWRMASK 0x00008889 +#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff +#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0 +static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val) +{ + return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK; +} +#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00 +#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8 +static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val) +{ + return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK; +} + +#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890 +#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00003fff +#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val) +{ + return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK; +} +#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x3fff0000 +#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val) +{ + return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK; +} + +#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891 +#define A6XX_RB_SAMPLE_COUNT_CONTROL_UNK0 0x00000001 +#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002 + +#define REG_A6XX_RB_LRZ_CNTL 0x00008898 +#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001 + +#define REG_A6XX_RB_Z_CLAMP_MIN 0x000088c0 +#define A6XX_RB_Z_CLAMP_MIN__MASK 0xffffffff +#define A6XX_RB_Z_CLAMP_MIN__SHIFT 0 +static inline uint32_t A6XX_RB_Z_CLAMP_MIN(float val) +{ + return ((fui(val)) << A6XX_RB_Z_CLAMP_MIN__SHIFT) & A6XX_RB_Z_CLAMP_MIN__MASK; +} + +#define REG_A6XX_RB_Z_CLAMP_MAX 0x000088c1 +#define A6XX_RB_Z_CLAMP_MAX__MASK 0xffffffff +#define A6XX_RB_Z_CLAMP_MAX__SHIFT 0 +static inline uint32_t A6XX_RB_Z_CLAMP_MAX(float val) +{ + return ((fui(val)) << A6XX_RB_Z_CLAMP_MAX__SHIFT) & A6XX_RB_Z_CLAMP_MAX__MASK; +} + +#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0 +#define A6XX_RB_UNKNOWN_88D0_UNK0__MASK 0x00001fff +#define A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT 0 +static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK0(uint32_t val) +{ + return ((val) << A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK0__MASK; +} +#define A6XX_RB_UNKNOWN_88D0_UNK16__MASK 0x07ff0000 +#define A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT 16 +static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK16(uint32_t val) +{ + return ((val) << A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK16__MASK; +} + +#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1 +#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00003fff +#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK; +} +#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x3fff0000 +#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16 +static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK; +} + +#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2 +#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00003fff +#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK; +} +#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x3fff0000 +#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16 +static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK; +} + +#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3 +#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x0000003f +#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0 +static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val) +{ + return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK; +} +#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x00007f00 +#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8 +static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val) +{ + return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK; +} + +#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4 +#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00003fff +#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0 +static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val) +{ + return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK; +} +#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x3fff0000 +#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16 +static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val) +{ + return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK; +} + +#define REG_A6XX_RB_MSAA_CNTL 0x000088d5 +#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018 +#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3 +static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK; +} + +#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6 +#define A6XX_RB_BLIT_BASE_GMEM__MASK 0xfffff000 +#define A6XX_RB_BLIT_BASE_GMEM__SHIFT 12 +static inline uint32_t A6XX_RB_BLIT_BASE_GMEM(uint32_t val) +{ + return ((val >> 12) << A6XX_RB_BLIT_BASE_GMEM__SHIFT) & A6XX_RB_BLIT_BASE_GMEM__MASK; +} + +#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7 +#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003 +#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val) +{ + return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK; +} +#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004 +#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018 +#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3 +static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK; +} +#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060 +#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5 +static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK; +} +#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80 +#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7 +static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK; +} +#define A6XX_RB_BLIT_DST_INFO_UNK15 0x00008000 + +#define REG_A6XX_RB_BLIT_DST 0x000088d8 +#define A6XX_RB_BLIT_DST__MASK 0xffffffff +#define A6XX_RB_BLIT_DST__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_DST(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_DST__SHIFT) & A6XX_RB_BLIT_DST__MASK; +} + +#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da +#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff +#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK; +} + +#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db +#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0x1fffffff +#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_RB_BLIT_FLAG_DST 0x000088dc +#define A6XX_RB_BLIT_FLAG_DST__MASK 0xffffffff +#define A6XX_RB_BLIT_FLAG_DST__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_FLAG_DST(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_FLAG_DST__SHIFT) & A6XX_RB_BLIT_FLAG_DST__MASK; +} + +#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de +#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff +#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK; +} +#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK 0x0ffff800 +#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT 11 +static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 7) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df + +#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0 + +#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1 + +#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2 + +#define REG_A6XX_RB_BLIT_INFO 0x000088e3 +#define A6XX_RB_BLIT_INFO_UNK0 0x00000001 +#define A6XX_RB_BLIT_INFO_GMEM 0x00000002 +#define A6XX_RB_BLIT_INFO_SAMPLE_0 0x00000004 +#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008 +#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0 +#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4 +static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK; +} +#define A6XX_RB_BLIT_INFO_UNK8__MASK 0x00000300 +#define A6XX_RB_BLIT_INFO_UNK8__SHIFT 8 +static inline uint32_t A6XX_RB_BLIT_INFO_UNK8(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_INFO_UNK8__SHIFT) & A6XX_RB_BLIT_INFO_UNK8__MASK; +} +#define A6XX_RB_BLIT_INFO_UNK12__MASK 0x0000f000 +#define A6XX_RB_BLIT_INFO_UNK12__SHIFT 12 +static inline uint32_t A6XX_RB_BLIT_INFO_UNK12(uint32_t val) +{ + return ((val) << A6XX_RB_BLIT_INFO_UNK12__SHIFT) & A6XX_RB_BLIT_INFO_UNK12__MASK; +} + +#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0 + +#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1 +#define A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK 0xffffffff +#define A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT 0 +static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_BASE(uint32_t val) +{ + return ((val) << A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK; +} + +#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH 0x000088f3 +#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff +#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK; +} +#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x00fff800 +#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11 +static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 7) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4 + +#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900 +#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK 0xffffffff +#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT 0 +static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_BASE(uint32_t val) +{ + return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK; +} + +#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902 +#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK 0x0000007f +#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK; +} +#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK 0x00000700 +#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT 8 +static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val) +{ + return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK; +} +#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x0ffff800 +#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11 +static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 7) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; } + +static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; } +#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK 0xffffffff +#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT 0 +static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t val) +{ + return ((val) << A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK; +} + +static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; } +#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff +#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK; +} +#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffff800 +#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11 +static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 7) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927 +#define A6XX_RB_SAMPLE_COUNT_ADDR__MASK 0xffffffff +#define A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT 0 +static inline uint32_t A6XX_RB_SAMPLE_COUNT_ADDR(uint32_t val) +{ + return ((val) << A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT) & A6XX_RB_SAMPLE_COUNT_ADDR__MASK; +} + +#define REG_A6XX_RB_UNKNOWN_8A00 0x00008a00 + +#define REG_A6XX_RB_UNKNOWN_8A10 0x00008a10 + +#define REG_A6XX_RB_UNKNOWN_8A20 0x00008a20 + +#define REG_A6XX_RB_UNKNOWN_8A30 0x00008a30 + +#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00 +#define A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK 0x00000007 +#define A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT 0 +static inline uint32_t A6XX_RB_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val) +{ + return ((val) << A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK; +} +#define A6XX_RB_2D_BLIT_CNTL_OVERWRITEEN 0x00000008 +#define A6XX_RB_2D_BLIT_CNTL_UNK4__MASK 0x00000070 +#define A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT 4 +static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK4(uint32_t val) +{ + return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK4__MASK; +} +#define A6XX_RB_2D_BLIT_CNTL_SOLID_COLOR 0x00000080 +#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00 +#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8 +static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK; +} +#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000 +#define A6XX_RB_2D_BLIT_CNTL_UNK17__MASK 0x00060000 +#define A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT 17 +static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK17(uint32_t val) +{ + return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK17__MASK; +} +#define A6XX_RB_2D_BLIT_CNTL_D24S8 0x00080000 +#define A6XX_RB_2D_BLIT_CNTL_MASK__MASK 0x00f00000 +#define A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT 20 +static inline uint32_t A6XX_RB_2D_BLIT_CNTL_MASK(uint32_t val) +{ + return ((val) << A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_RB_2D_BLIT_CNTL_MASK__MASK; +} +#define A6XX_RB_2D_BLIT_CNTL_IFMT__MASK 0x1f000000 +#define A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT 24 +static inline uint32_t A6XX_RB_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val) +{ + return ((val) << A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_IFMT__MASK; +} +#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000 +#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29 +static inline uint32_t A6XX_RB_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val) +{ + return ((val) << A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK; +} + +#define REG_A6XX_RB_2D_UNKNOWN_8C01 0x00008c01 + +#define REG_A6XX_RB_2D_DST_INFO 0x00008c17 +#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK; +} +#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300 +#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8 +static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val) +{ + return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK; +} +#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK; +} +#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000 +#define A6XX_RB_2D_DST_INFO_SRGB 0x00002000 +#define A6XX_RB_2D_DST_INFO_SAMPLES__MASK 0x0000c000 +#define A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT 14 +static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK; +} +#define A6XX_RB_2D_DST_INFO_FILTER 0x00010000 +#define A6XX_RB_2D_DST_INFO_UNK17 0x00020000 +#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE 0x00040000 +#define A6XX_RB_2D_DST_INFO_UNK19 0x00080000 +#define A6XX_RB_2D_DST_INFO_UNK20 0x00100000 +#define A6XX_RB_2D_DST_INFO_UNK21 0x00200000 +#define A6XX_RB_2D_DST_INFO_UNK22 0x00400000 +#define A6XX_RB_2D_DST_INFO_UNK23__MASK 0x07800000 +#define A6XX_RB_2D_DST_INFO_UNK23__SHIFT 23 +static inline uint32_t A6XX_RB_2D_DST_INFO_UNK23(uint32_t val) +{ + return ((val) << A6XX_RB_2D_DST_INFO_UNK23__SHIFT) & A6XX_RB_2D_DST_INFO_UNK23__MASK; +} +#define A6XX_RB_2D_DST_INFO_UNK28 0x10000000 + +#define REG_A6XX_RB_2D_DST 0x00008c18 +#define A6XX_RB_2D_DST__MASK 0xffffffff +#define A6XX_RB_2D_DST__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST(uint32_t val) +{ + return ((val) << A6XX_RB_2D_DST__SHIFT) & A6XX_RB_2D_DST__MASK; +} + +#define REG_A6XX_RB_2D_DST_PITCH 0x00008c1a +#define A6XX_RB_2D_DST_PITCH__MASK 0x0000ffff +#define A6XX_RB_2D_DST_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK; +} + +#define REG_A6XX_RB_2D_DST_PLANE1 0x00008c1b +#define A6XX_RB_2D_DST_PLANE1__MASK 0xffffffff +#define A6XX_RB_2D_DST_PLANE1__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_PLANE1(uint32_t val) +{ + return ((val) << A6XX_RB_2D_DST_PLANE1__SHIFT) & A6XX_RB_2D_DST_PLANE1__MASK; +} + +#define REG_A6XX_RB_2D_DST_PLANE_PITCH 0x00008c1d +#define A6XX_RB_2D_DST_PLANE_PITCH__MASK 0x0000ffff +#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK; +} + +#define REG_A6XX_RB_2D_DST_PLANE2 0x00008c1e +#define A6XX_RB_2D_DST_PLANE2__MASK 0xffffffff +#define A6XX_RB_2D_DST_PLANE2__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_PLANE2(uint32_t val) +{ + return ((val) << A6XX_RB_2D_DST_PLANE2__SHIFT) & A6XX_RB_2D_DST_PLANE2__MASK; +} + +#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20 +#define A6XX_RB_2D_DST_FLAGS__MASK 0xffffffff +#define A6XX_RB_2D_DST_FLAGS__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_FLAGS(uint32_t val) +{ + return ((val) << A6XX_RB_2D_DST_FLAGS__SHIFT) & A6XX_RB_2D_DST_FLAGS__MASK; +} + +#define REG_A6XX_RB_2D_DST_FLAGS_PITCH 0x00008c22 +#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK 0x000000ff +#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK; +} + +#define REG_A6XX_RB_2D_DST_FLAGS_PLANE 0x00008c23 +#define A6XX_RB_2D_DST_FLAGS_PLANE__MASK 0xffffffff +#define A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE(uint32_t val) +{ + return ((val) << A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE__MASK; +} + +#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH 0x00008c25 +#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK 0x000000ff +#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT 0 +static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK; +} + +#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c + +#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d + +#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e + +#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f + +#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01 + +#define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04 + +#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05 + +#define REG_A6XX_RB_CCU_CNTL 0x00008e07 +#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000 +#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23 +static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val) +{ + return ((val >> 12) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK; +} +#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK 0x001ff000 +#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT 12 +static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET(uint32_t val) +{ + return ((val >> 12) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK; +} +#define A6XX_RB_CCU_CNTL_GMEM 0x00400000 +#define A6XX_RB_CCU_CNTL_UNK2 0x00000004 + +#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08 +#define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001 +#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006 +#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT 1 +static inline uint32_t A6XX_RB_NC_MODE_CNTL_LOWER_BIT(uint32_t val) +{ + return ((val) << A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK; +} +#define A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008 +#define A6XX_RB_NC_MODE_CNTL_AMSBC 0x00000010 +#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000400 +#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT 10 +static inline uint32_t A6XX_RB_NC_MODE_CNTL_UPPER_BIT(uint32_t val) +{ + return ((val) << A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK; +} +#define A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR 0x00000800 +#define A6XX_RB_NC_MODE_CNTL_UNK12__MASK 0x00003000 +#define A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT 12 +static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val) +{ + return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK; +} + +static inline uint32_t REG_A6XX_RB_PERFCTR_RB_SEL(uint32_t i0) { return 0x00008e10 + 0x1*i0; } + +static inline uint32_t REG_A6XX_RB_PERFCTR_CCU_SEL(uint32_t i0) { return 0x00008e18 + 0x1*i0; } + +#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28 + +static inline uint32_t REG_A6XX_RB_PERFCTR_CMP_SEL(uint32_t i0) { return 0x00008e2c + 0x1*i0; } + +#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b + +#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d + +#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50 + +#define REG_A6XX_RB_UNKNOWN_8E51 0x00008e51 +#define A6XX_RB_UNKNOWN_8E51__MASK 0xffffffff +#define A6XX_RB_UNKNOWN_8E51__SHIFT 0 +static inline uint32_t A6XX_RB_UNKNOWN_8E51(uint32_t val) +{ + return ((val) << A6XX_RB_UNKNOWN_8E51__SHIFT) & A6XX_RB_UNKNOWN_8E51__MASK; +} + +#define REG_A6XX_VPC_GS_PARAM 0x00009100 +#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK 0x000000ff +#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT 0 +static inline uint32_t A6XX_VPC_GS_PARAM_LINELENGTHLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT) & A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK; +} + +#define REG_A6XX_VPC_VS_CLIP_CNTL 0x00009101 +#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff +#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK; +} +#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 +#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 +static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; +} +#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 +#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 +static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; +} + +#define REG_A6XX_VPC_GS_CLIP_CNTL 0x00009102 +#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff +#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK; +} +#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 +#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 +static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; +} +#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 +#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 +static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; +} + +#define REG_A6XX_VPC_DS_CLIP_CNTL 0x00009103 +#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff +#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT 0 +static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK; +} +#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00 +#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8 +static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK; +} +#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000 +#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16 +static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK; +} + +#define REG_A6XX_VPC_VS_LAYER_CNTL 0x00009104 +#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff +#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT 0 +static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK; +} +#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00 +#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT 8 +static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK; +} + +#define REG_A6XX_VPC_GS_LAYER_CNTL 0x00009105 +#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff +#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT 0 +static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK; +} +#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00 +#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT 8 +static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK; +} + +#define REG_A6XX_VPC_DS_LAYER_CNTL 0x00009106 +#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff +#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT 0 +static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_LAYERLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK; +} +#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00 +#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT 8 +static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK; +} + +#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107 +#define A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD 0x00000001 +#define A6XX_VPC_UNKNOWN_9107_UNK2 0x00000004 + +#define REG_A6XX_VPC_POLYGON_MODE 0x00009108 +#define A6XX_VPC_POLYGON_MODE_MODE__MASK 0x00000003 +#define A6XX_VPC_POLYGON_MODE_MODE__SHIFT 0 +static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val) +{ + return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK; +} + +static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; } + +#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210 + +#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211 + +static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; } + +#define REG_A6XX_VPC_SO_CNTL 0x00009216 +#define A6XX_VPC_SO_CNTL_ADDR__MASK 0x000000ff +#define A6XX_VPC_SO_CNTL_ADDR__SHIFT 0 +static inline uint32_t A6XX_VPC_SO_CNTL_ADDR(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_CNTL_ADDR__SHIFT) & A6XX_VPC_SO_CNTL_ADDR__MASK; +} +#define A6XX_VPC_SO_CNTL_RESET 0x00010000 + +#define REG_A6XX_VPC_SO_PROG 0x00009217 +#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003 +#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0 +static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK; +} +#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc +#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2 +static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val) +{ + return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK; +} +#define A6XX_VPC_SO_PROG_A_EN 0x00000800 +#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000 +#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12 +static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK; +} +#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000 +#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14 +static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val) +{ + return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK; +} +#define A6XX_VPC_SO_PROG_B_EN 0x00800000 + +#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218 +#define A6XX_VPC_SO_STREAM_COUNTS__MASK 0xffffffff +#define A6XX_VPC_SO_STREAM_COUNTS__SHIFT 0 +static inline uint32_t A6XX_VPC_SO_STREAM_COUNTS(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_STREAM_COUNTS__SHIFT) & A6XX_VPC_SO_STREAM_COUNTS__MASK; +} + +static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; } + +static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; } +#define A6XX_VPC_SO_BUFFER_BASE__MASK 0xffffffff +#define A6XX_VPC_SO_BUFFER_BASE__SHIFT 0 +static inline uint32_t A6XX_VPC_SO_BUFFER_BASE(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_BUFFER_BASE__SHIFT) & A6XX_VPC_SO_BUFFER_BASE__MASK; +} + +static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; } +#define A6XX_VPC_SO_BUFFER_SIZE__MASK 0xfffffffc +#define A6XX_VPC_SO_BUFFER_SIZE__SHIFT 2 +static inline uint32_t A6XX_VPC_SO_BUFFER_SIZE(uint32_t val) +{ + return ((val >> 2) << A6XX_VPC_SO_BUFFER_SIZE__SHIFT) & A6XX_VPC_SO_BUFFER_SIZE__MASK; +} + +static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; } + +static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; } +#define A6XX_VPC_SO_BUFFER_OFFSET__MASK 0xfffffffc +#define A6XX_VPC_SO_BUFFER_OFFSET__SHIFT 2 +static inline uint32_t A6XX_VPC_SO_BUFFER_OFFSET(uint32_t val) +{ + return ((val >> 2) << A6XX_VPC_SO_BUFFER_OFFSET__SHIFT) & A6XX_VPC_SO_BUFFER_OFFSET__MASK; +} + +static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; } +#define A6XX_VPC_SO_FLUSH_BASE__MASK 0xffffffff +#define A6XX_VPC_SO_FLUSH_BASE__SHIFT 0 +static inline uint32_t A6XX_VPC_SO_FLUSH_BASE(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_FLUSH_BASE__SHIFT) & A6XX_VPC_SO_FLUSH_BASE__MASK; +} + +#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236 +#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001 + +#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300 + +#define REG_A6XX_VPC_VS_PACK 0x00009301 +#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK 0x000000ff +#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_VPC_VS_PACK_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK; +} +#define A6XX_VPC_VS_PACK_POSITIONLOC__MASK 0x0000ff00 +#define A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT 8 +static inline uint32_t A6XX_VPC_VS_PACK_POSITIONLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_VS_PACK_POSITIONLOC__MASK; +} +#define A6XX_VPC_VS_PACK_PSIZELOC__MASK 0x00ff0000 +#define A6XX_VPC_VS_PACK_PSIZELOC__SHIFT 16 +static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK; +} +#define A6XX_VPC_VS_PACK_EXTRAPOS__MASK 0x0f000000 +#define A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT 24 +static inline uint32_t A6XX_VPC_VS_PACK_EXTRAPOS(uint32_t val) +{ + return ((val) << A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_VS_PACK_EXTRAPOS__MASK; +} + +#define REG_A6XX_VPC_GS_PACK 0x00009302 +#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK 0x000000ff +#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_VPC_GS_PACK_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK; +} +#define A6XX_VPC_GS_PACK_POSITIONLOC__MASK 0x0000ff00 +#define A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT 8 +static inline uint32_t A6XX_VPC_GS_PACK_POSITIONLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_GS_PACK_POSITIONLOC__MASK; +} +#define A6XX_VPC_GS_PACK_PSIZELOC__MASK 0x00ff0000 +#define A6XX_VPC_GS_PACK_PSIZELOC__SHIFT 16 +static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK; +} +#define A6XX_VPC_GS_PACK_EXTRAPOS__MASK 0x0f000000 +#define A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT 24 +static inline uint32_t A6XX_VPC_GS_PACK_EXTRAPOS(uint32_t val) +{ + return ((val) << A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_GS_PACK_EXTRAPOS__MASK; +} + +#define REG_A6XX_VPC_DS_PACK 0x00009303 +#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK 0x000000ff +#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_VPC_DS_PACK_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK; +} +#define A6XX_VPC_DS_PACK_POSITIONLOC__MASK 0x0000ff00 +#define A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT 8 +static inline uint32_t A6XX_VPC_DS_PACK_POSITIONLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_DS_PACK_POSITIONLOC__MASK; +} +#define A6XX_VPC_DS_PACK_PSIZELOC__MASK 0x00ff0000 +#define A6XX_VPC_DS_PACK_PSIZELOC__SHIFT 16 +static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK; +} +#define A6XX_VPC_DS_PACK_EXTRAPOS__MASK 0x0f000000 +#define A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT 24 +static inline uint32_t A6XX_VPC_DS_PACK_EXTRAPOS(uint32_t val) +{ + return ((val) << A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_DS_PACK_EXTRAPOS__MASK; +} + +#define REG_A6XX_VPC_CNTL_0 0x00009304 +#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff +#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0 +static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val) +{ + return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK; +} +#define A6XX_VPC_CNTL_0_PRIMIDLOC__MASK 0x0000ff00 +#define A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT 8 +static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK; +} +#define A6XX_VPC_CNTL_0_VARYING 0x00010000 +#define A6XX_VPC_CNTL_0_VIEWIDLOC__MASK 0xff000000 +#define A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT 24 +static inline uint32_t A6XX_VPC_CNTL_0_VIEWIDLOC(uint32_t val) +{ + return ((val) << A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT) & A6XX_VPC_CNTL_0_VIEWIDLOC__MASK; +} + +#define REG_A6XX_VPC_SO_STREAM_CNTL 0x00009305 +#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK 0x00000007 +#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT 0 +static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK; +} +#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK 0x00000038 +#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT 3 +static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK; +} +#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK 0x000001c0 +#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT 6 +static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK; +} +#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK 0x00000e00 +#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT 9 +static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK; +} +#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000 +#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15 +static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val) +{ + return ((val) << A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK; +} + +#define REG_A6XX_VPC_SO_DISABLE 0x00009306 +#define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001 + +#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600 + +#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601 + +#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602 + +#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603 + +static inline uint32_t REG_A6XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x00009604 + 0x1*i0; } + +#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800 + +#define REG_A6XX_PC_HS_INPUT_SIZE 0x00009801 +#define A6XX_PC_HS_INPUT_SIZE_SIZE__MASK 0x000007ff +#define A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT 0 +static inline uint32_t A6XX_PC_HS_INPUT_SIZE_SIZE(uint32_t val) +{ + return ((val) << A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT) & A6XX_PC_HS_INPUT_SIZE_SIZE__MASK; +} +#define A6XX_PC_HS_INPUT_SIZE_UNK13__MASK 0x00002000 +#define A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT 13 +static inline uint32_t A6XX_PC_HS_INPUT_SIZE_UNK13(uint32_t val) +{ + return ((val) << A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT) & A6XX_PC_HS_INPUT_SIZE_UNK13__MASK; +} + +#define REG_A6XX_PC_TESS_CNTL 0x00009802 +#define A6XX_PC_TESS_CNTL_SPACING__MASK 0x00000003 +#define A6XX_PC_TESS_CNTL_SPACING__SHIFT 0 +static inline uint32_t A6XX_PC_TESS_CNTL_SPACING(enum a6xx_tess_spacing val) +{ + return ((val) << A6XX_PC_TESS_CNTL_SPACING__SHIFT) & A6XX_PC_TESS_CNTL_SPACING__MASK; +} +#define A6XX_PC_TESS_CNTL_OUTPUT__MASK 0x0000000c +#define A6XX_PC_TESS_CNTL_OUTPUT__SHIFT 2 +static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val) +{ + return ((val) << A6XX_PC_TESS_CNTL_OUTPUT__SHIFT) & A6XX_PC_TESS_CNTL_OUTPUT__MASK; +} + +#define REG_A6XX_PC_RESTART_INDEX 0x00009803 + +#define REG_A6XX_PC_MODE_CNTL 0x00009804 + +#define REG_A6XX_PC_POWER_CNTL 0x00009805 + +#define REG_A6XX_PC_PRIMID_PASSTHRU 0x00009806 + +#define REG_A6XX_PC_SO_STREAM_CNTL 0x00009808 +#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE 0x00008000 + +#define REG_A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL 0x0000980a +#define A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001 + +#define REG_A6XX_PC_DRAW_CMD 0x00009840 +#define A6XX_PC_DRAW_CMD_STATE_ID__MASK 0x000000ff +#define A6XX_PC_DRAW_CMD_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_PC_DRAW_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_PC_DRAW_CMD_STATE_ID__SHIFT) & A6XX_PC_DRAW_CMD_STATE_ID__MASK; +} + +#define REG_A6XX_PC_DISPATCH_CMD 0x00009841 +#define A6XX_PC_DISPATCH_CMD_STATE_ID__MASK 0x000000ff +#define A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_PC_DISPATCH_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_PC_DISPATCH_CMD_STATE_ID__MASK; +} + +#define REG_A6XX_PC_EVENT_CMD 0x00009842 +#define A6XX_PC_EVENT_CMD_STATE_ID__MASK 0x00ff0000 +#define A6XX_PC_EVENT_CMD_STATE_ID__SHIFT 16 +static inline uint32_t A6XX_PC_EVENT_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_PC_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_EVENT_CMD_STATE_ID__MASK; +} +#define A6XX_PC_EVENT_CMD_EVENT__MASK 0x0000007f +#define A6XX_PC_EVENT_CMD_EVENT__SHIFT 0 +static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val) +{ + return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK; +} + +#define REG_A6XX_PC_MARKER 0x00009880 + +#define REG_A6XX_PC_POLYGON_MODE 0x00009981 +#define A6XX_PC_POLYGON_MODE_MODE__MASK 0x00000003 +#define A6XX_PC_POLYGON_MODE_MODE__SHIFT 0 +static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val) +{ + return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK; +} + +#define REG_A6XX_PC_RASTER_CNTL 0x00009980 +#define A6XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003 +#define A6XX_PC_RASTER_CNTL_STREAM__SHIFT 0 +static inline uint32_t A6XX_PC_RASTER_CNTL_STREAM(uint32_t val) +{ + return ((val) << A6XX_PC_RASTER_CNTL_STREAM__SHIFT) & A6XX_PC_RASTER_CNTL_STREAM__MASK; +} +#define A6XX_PC_RASTER_CNTL_DISCARD 0x00000004 + +#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00 +#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001 +#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002 +#define A6XX_PC_PRIMITIVE_CNTL_0_TESS_UPPER_LEFT_DOMAIN_ORIGIN 0x00000004 +#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3 0x00000008 + +#define REG_A6XX_PC_VS_OUT_CNTL 0x00009b01 +#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff +#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK; +} +#define A6XX_PC_VS_OUT_CNTL_PSIZE 0x00000100 +#define A6XX_PC_VS_OUT_CNTL_LAYER 0x00000200 +#define A6XX_PC_VS_OUT_CNTL_VIEW 0x00000400 +#define A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID 0x00000800 +#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 +#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT 16 +static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK; +} + +#define REG_A6XX_PC_GS_OUT_CNTL 0x00009b02 +#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff +#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK; +} +#define A6XX_PC_GS_OUT_CNTL_PSIZE 0x00000100 +#define A6XX_PC_GS_OUT_CNTL_LAYER 0x00000200 +#define A6XX_PC_GS_OUT_CNTL_VIEW 0x00000400 +#define A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID 0x00000800 +#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 +#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT 16 +static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK; +} + +#define REG_A6XX_PC_HS_OUT_CNTL 0x00009b03 +#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff +#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK; +} +#define A6XX_PC_HS_OUT_CNTL_PSIZE 0x00000100 +#define A6XX_PC_HS_OUT_CNTL_LAYER 0x00000200 +#define A6XX_PC_HS_OUT_CNTL_VIEW 0x00000400 +#define A6XX_PC_HS_OUT_CNTL_PRIMITIVE_ID 0x00000800 +#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 +#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT 16 +static inline uint32_t A6XX_PC_HS_OUT_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK; +} + +#define REG_A6XX_PC_DS_OUT_CNTL 0x00009b04 +#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff +#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK; +} +#define A6XX_PC_DS_OUT_CNTL_PSIZE 0x00000100 +#define A6XX_PC_DS_OUT_CNTL_LAYER 0x00000200 +#define A6XX_PC_DS_OUT_CNTL_VIEW 0x00000400 +#define A6XX_PC_DS_OUT_CNTL_PRIMITIVE_ID 0x00000800 +#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000 +#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT 16 +static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val) +{ + return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK; +} + +#define REG_A6XX_PC_PRIMITIVE_CNTL_5 0x00009b05 +#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff +#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0 +static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val) +{ + return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK; +} +#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00 +#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10 +static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val) +{ + return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK; +} +#define A6XX_PC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000 +#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000 +#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16 +static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val) +{ + return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK; +} +#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK 0x00040000 +#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT 18 +static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_UNK18(uint32_t val) +{ + return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK; +} + +#define REG_A6XX_PC_PRIMITIVE_CNTL_6 0x00009b06 +#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK 0x000007ff +#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT 0 +static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val) +{ + return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK; +} + +#define REG_A6XX_PC_MULTIVIEW_CNTL 0x00009b07 +#define A6XX_PC_MULTIVIEW_CNTL_ENABLE 0x00000001 +#define A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002 +#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c +#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT 2 +static inline uint32_t A6XX_PC_MULTIVIEW_CNTL_VIEWS(uint32_t val) +{ + return ((val) << A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK; +} + +#define REG_A6XX_PC_MULTIVIEW_MASK 0x00009b08 + +#define REG_A6XX_PC_2D_EVENT_CMD 0x00009c00 +#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK 0x0000007f +#define A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT 0 +static inline uint32_t A6XX_PC_2D_EVENT_CMD_EVENT(enum vgt_event_type val) +{ + return ((val) << A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_2D_EVENT_CMD_EVENT__MASK; +} +#define A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00 +#define A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT 8 +static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK; +} + +#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00 + +#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01 + +#define REG_A6XX_PC_DRAW_INDX_BASE 0x00009e04 + +#define REG_A6XX_PC_DRAW_FIRST_INDX 0x00009e06 + +#define REG_A6XX_PC_DRAW_MAX_INDICES 0x00009e07 + +#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08 +#define A6XX_PC_TESSFACTOR_ADDR__MASK 0xffffffff +#define A6XX_PC_TESSFACTOR_ADDR__SHIFT 0 +static inline uint32_t A6XX_PC_TESSFACTOR_ADDR(uint32_t val) +{ + return ((val) << A6XX_PC_TESSFACTOR_ADDR__SHIFT) & A6XX_PC_TESSFACTOR_ADDR__MASK; +} + +#define REG_A6XX_PC_DRAW_INITIATOR 0x00009e0b +#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f +#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0 +static inline uint32_t A6XX_PC_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK; +} +#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0 +#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK; +} +#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK 0x00000300 +#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT 8 +static inline uint32_t A6XX_PC_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT) & A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK; +} +#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000c00 +#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT 10 +static inline uint32_t A6XX_PC_DRAW_INITIATOR_INDEX_SIZE(enum a4xx_index_size val) +{ + return ((val) << A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK; +} +#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK 0x00003000 +#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT 12 +static inline uint32_t A6XX_PC_DRAW_INITIATOR_PATCH_TYPE(enum a6xx_patch_type val) +{ + return ((val) << A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK; +} +#define A6XX_PC_DRAW_INITIATOR_GS_ENABLE 0x00010000 +#define A6XX_PC_DRAW_INITIATOR_TESS_ENABLE 0x00020000 + +#define REG_A6XX_PC_DRAW_NUM_INSTANCES 0x00009e0c + +#define REG_A6XX_PC_DRAW_NUM_INDICES 0x00009e0d + +#define REG_A6XX_PC_VSTREAM_CONTROL 0x00009e11 +#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK 0x0000ffff +#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT 0 +static inline uint32_t A6XX_PC_VSTREAM_CONTROL_UNK0(uint32_t val) +{ + return ((val) << A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT) & A6XX_PC_VSTREAM_CONTROL_UNK0__MASK; +} +#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK 0x003f0000 +#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT 16 +static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_SIZE(uint32_t val) +{ + return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK; +} +#define A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK 0x07c00000 +#define A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT 22 +static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val) +{ + return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK; +} + +#define REG_A6XX_PC_BIN_PRIM_STRM 0x00009e12 +#define A6XX_PC_BIN_PRIM_STRM__MASK 0xffffffff +#define A6XX_PC_BIN_PRIM_STRM__SHIFT 0 +static inline uint32_t A6XX_PC_BIN_PRIM_STRM(uint32_t val) +{ + return ((val) << A6XX_PC_BIN_PRIM_STRM__SHIFT) & A6XX_PC_BIN_PRIM_STRM__MASK; +} + +#define REG_A6XX_PC_BIN_DRAW_STRM 0x00009e14 +#define A6XX_PC_BIN_DRAW_STRM__MASK 0xffffffff +#define A6XX_PC_BIN_DRAW_STRM__SHIFT 0 +static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val) +{ + return ((val) << A6XX_PC_BIN_DRAW_STRM__SHIFT) & A6XX_PC_BIN_DRAW_STRM__MASK; +} + +#define REG_A6XX_PC_VISIBILITY_OVERRIDE 0x00009e1c +#define A6XX_PC_VISIBILITY_OVERRIDE_OVERRIDE 0x00000001 + +static inline uint32_t REG_A6XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e34 + 0x1*i0; } + +#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72 + +#define REG_A6XX_VFD_CONTROL_0 0x0000a000 +#define A6XX_VFD_CONTROL_0_FETCH_CNT__MASK 0x0000003f +#define A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT 0 +static inline uint32_t A6XX_VFD_CONTROL_0_FETCH_CNT(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT) & A6XX_VFD_CONTROL_0_FETCH_CNT__MASK; +} +#define A6XX_VFD_CONTROL_0_DECODE_CNT__MASK 0x00003f00 +#define A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT 8 +static inline uint32_t A6XX_VFD_CONTROL_0_DECODE_CNT(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT) & A6XX_VFD_CONTROL_0_DECODE_CNT__MASK; +} + +#define REG_A6XX_VFD_CONTROL_1 0x0000a001 +#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff +#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0 +static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK; +} +#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00 +#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8 +static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK; +} +#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000 +#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16 +static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK; +} +#define A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK 0xff000000 +#define A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT 24 +static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VIEWID(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK; +} + +#define REG_A6XX_VFD_CONTROL_2 0x0000a002 +#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK 0x000000ff +#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT 0 +static inline uint32_t A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK; +} +#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK 0x0000ff00 +#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT 8 +static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK; +} + +#define REG_A6XX_VFD_CONTROL_3 0x0000a003 +#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK 0x000000ff +#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT 0 +static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPRIMID(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK; +} +#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK 0x0000ff00 +#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT 8 +static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK; +} +#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000 +#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16 +static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK; +} +#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000 +#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24 +static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK; +} + +#define REG_A6XX_VFD_CONTROL_4 0x0000a004 +#define A6XX_VFD_CONTROL_4_UNK0__MASK 0x000000ff +#define A6XX_VFD_CONTROL_4_UNK0__SHIFT 0 +static inline uint32_t A6XX_VFD_CONTROL_4_UNK0(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_4_UNK0__SHIFT) & A6XX_VFD_CONTROL_4_UNK0__MASK; +} + +#define REG_A6XX_VFD_CONTROL_5 0x0000a005 +#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK 0x000000ff +#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT 0 +static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK; +} +#define A6XX_VFD_CONTROL_5_UNK8__MASK 0x0000ff00 +#define A6XX_VFD_CONTROL_5_UNK8__SHIFT 8 +static inline uint32_t A6XX_VFD_CONTROL_5_UNK8(uint32_t val) +{ + return ((val) << A6XX_VFD_CONTROL_5_UNK8__SHIFT) & A6XX_VFD_CONTROL_5_UNK8__MASK; +} + +#define REG_A6XX_VFD_CONTROL_6 0x0000a006 +#define A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU 0x00000001 + +#define REG_A6XX_VFD_MODE_CNTL 0x0000a007 +#define A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK 0x00000007 +#define A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT 0 +static inline uint32_t A6XX_VFD_MODE_CNTL_RENDER_MODE(enum a6xx_render_mode val) +{ + return ((val) << A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT) & A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK; +} + +#define REG_A6XX_VFD_MULTIVIEW_CNTL 0x0000a008 +#define A6XX_VFD_MULTIVIEW_CNTL_ENABLE 0x00000001 +#define A6XX_VFD_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002 +#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c +#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT 2 +static inline uint32_t A6XX_VFD_MULTIVIEW_CNTL_VIEWS(uint32_t val) +{ + return ((val) << A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK; +} + +#define REG_A6XX_VFD_ADD_OFFSET 0x0000a009 +#define A6XX_VFD_ADD_OFFSET_VERTEX 0x00000001 +#define A6XX_VFD_ADD_OFFSET_INSTANCE 0x00000002 + +#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e + +#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f + +static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; } + +static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; } +#define A6XX_VFD_FETCH_BASE__MASK 0xffffffff +#define A6XX_VFD_FETCH_BASE__SHIFT 0 +static inline uint32_t A6XX_VFD_FETCH_BASE(uint32_t val) +{ + return ((val) << A6XX_VFD_FETCH_BASE__SHIFT) & A6XX_VFD_FETCH_BASE__MASK; +} + +static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; } + +static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; } + +static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; } + +static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; } +#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f +#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0 +static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val) +{ + return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK; +} +#define A6XX_VFD_DECODE_INSTR_OFFSET__MASK 0x0001ffe0 +#define A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT 5 +static inline uint32_t A6XX_VFD_DECODE_INSTR_OFFSET(uint32_t val) +{ + return ((val) << A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT) & A6XX_VFD_DECODE_INSTR_OFFSET__MASK; +} +#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000 +#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000 +#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20 +static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK; +} +#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000 +#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28 +static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK; +} +#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000 +#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000 + +static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; } + +static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; } + +static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; } +#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f +#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0 +static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val) +{ + return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK; +} +#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0 +#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4 +static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val) +{ + return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK; +} + +#define REG_A6XX_VFD_POWER_CNTL 0x0000a0f8 + +#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601 + +static inline uint32_t REG_A6XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; } + +#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800 +#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000 +#define A6XX_SP_VS_CTRL_REG0_UNK21 0x00200000 +#define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK; +} +#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e +#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 +static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 +#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 +static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A6XX_SP_VS_CTRL_REG0_UNK13 0x00002000 +#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 +#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14 +static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801 + +#define REG_A6XX_SP_VS_PRIMITIVE_CNTL 0x0000a802 +#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f +#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT 0 +static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val) +{ + return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK; +} +#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0 +#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6 +static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK; +} + +static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; } +#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff +#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK; +} +#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00 +#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8 +static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK; +} +#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000 +#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK; +} +#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000 +#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24 +static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; } +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET 0x0000a81b + +#define REG_A6XX_SP_VS_OBJ_START 0x0000a81c +#define A6XX_SP_VS_OBJ_START__MASK 0xffffffff +#define A6XX_SP_VS_OBJ_START__SHIFT 0 +static inline uint32_t A6XX_SP_VS_OBJ_START(uint32_t val) +{ + return ((val) << A6XX_SP_VS_OBJ_START__SHIFT) & A6XX_SP_VS_OBJ_START__MASK; +} + +#define REG_A6XX_SP_VS_PVT_MEM_PARAM 0x0000a81e +#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff +#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) +{ + return ((val >> 9) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; +} +#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A6XX_SP_VS_PVT_MEM_ADDR 0x0000a81f +#define A6XX_SP_VS_PVT_MEM_ADDR__MASK 0xffffffff +#define A6XX_SP_VS_PVT_MEM_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_VS_PVT_MEM_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_VS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_VS_PVT_MEM_ADDR__MASK; +} + +#define REG_A6XX_SP_VS_PVT_MEM_SIZE 0x0000a821 +#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff +#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 +static inline uint32_t A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) +{ + return ((val >> 12) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; +} +#define A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 + +#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822 + +#define REG_A6XX_SP_VS_CONFIG 0x0000a823 +#define A6XX_SP_VS_CONFIG_BINDLESS_TEX 0x00000001 +#define A6XX_SP_VS_CONFIG_BINDLESS_SAMP 0x00000002 +#define A6XX_SP_VS_CONFIG_BINDLESS_IBO 0x00000004 +#define A6XX_SP_VS_CONFIG_BINDLESS_UBO 0x00000008 +#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100 +#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00 +#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9 +static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val) +{ + return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK; +} +#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x003e0000 +#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17 +static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val) +{ + return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK; +} +#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x1fc00000 +#define A6XX_SP_VS_CONFIG_NIBO__SHIFT 22 +static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val) +{ + return ((val) << A6XX_SP_VS_CONFIG_NIBO__SHIFT) & A6XX_SP_VS_CONFIG_NIBO__MASK; +} + +#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824 + +#define REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET 0x0000a825 +#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff +#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) +{ + return ((val >> 11) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; +} + +#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830 +#define A6XX_SP_HS_CTRL_REG0_UNK20 0x00100000 +#define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK; +} +#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e +#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 +static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 +#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 +static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A6XX_SP_HS_CTRL_REG0_UNK13 0x00002000 +#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 +#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14 +static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A6XX_SP_HS_WAVE_INPUT_SIZE 0x0000a831 + +#define REG_A6XX_SP_HS_BRANCH_COND 0x0000a832 + +#define REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET 0x0000a833 + +#define REG_A6XX_SP_HS_OBJ_START 0x0000a834 +#define A6XX_SP_HS_OBJ_START__MASK 0xffffffff +#define A6XX_SP_HS_OBJ_START__SHIFT 0 +static inline uint32_t A6XX_SP_HS_OBJ_START(uint32_t val) +{ + return ((val) << A6XX_SP_HS_OBJ_START__SHIFT) & A6XX_SP_HS_OBJ_START__MASK; +} + +#define REG_A6XX_SP_HS_PVT_MEM_PARAM 0x0000a836 +#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff +#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) +{ + return ((val >> 9) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; +} +#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A6XX_SP_HS_PVT_MEM_ADDR 0x0000a837 +#define A6XX_SP_HS_PVT_MEM_ADDR__MASK 0xffffffff +#define A6XX_SP_HS_PVT_MEM_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_HS_PVT_MEM_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_HS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_HS_PVT_MEM_ADDR__MASK; +} + +#define REG_A6XX_SP_HS_PVT_MEM_SIZE 0x0000a839 +#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff +#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 +static inline uint32_t A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) +{ + return ((val >> 12) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; +} +#define A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 + +#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a + +#define REG_A6XX_SP_HS_CONFIG 0x0000a83b +#define A6XX_SP_HS_CONFIG_BINDLESS_TEX 0x00000001 +#define A6XX_SP_HS_CONFIG_BINDLESS_SAMP 0x00000002 +#define A6XX_SP_HS_CONFIG_BINDLESS_IBO 0x00000004 +#define A6XX_SP_HS_CONFIG_BINDLESS_UBO 0x00000008 +#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100 +#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00 +#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9 +static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val) +{ + return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK; +} +#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x003e0000 +#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17 +static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val) +{ + return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK; +} +#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x1fc00000 +#define A6XX_SP_HS_CONFIG_NIBO__SHIFT 22 +static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val) +{ + return ((val) << A6XX_SP_HS_CONFIG_NIBO__SHIFT) & A6XX_SP_HS_CONFIG_NIBO__MASK; +} + +#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c + +#define REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET 0x0000a83d +#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff +#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) +{ + return ((val >> 11) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; +} + +#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840 +#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x00100000 +#define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK; +} +#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e +#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 +static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 +#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 +static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A6XX_SP_DS_CTRL_REG0_UNK13 0x00002000 +#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 +#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14 +static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A6XX_SP_DS_BRANCH_COND 0x0000a841 + +#define REG_A6XX_SP_DS_PRIMITIVE_CNTL 0x0000a842 +#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f +#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT 0 +static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val) +{ + return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK; +} +#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0 +#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6 +static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK; +} + +static inline uint32_t REG_A6XX_SP_DS_OUT(uint32_t i0) { return 0x0000a843 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; } +#define A6XX_SP_DS_OUT_REG_A_REGID__MASK 0x000000ff +#define A6XX_SP_DS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A6XX_SP_DS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_A_REGID__MASK; +} +#define A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00000f00 +#define A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 8 +static inline uint32_t A6XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK; +} +#define A6XX_SP_DS_OUT_REG_B_REGID__MASK 0x00ff0000 +#define A6XX_SP_DS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A6XX_SP_DS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_B_REGID__MASK; +} +#define A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x0f000000 +#define A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 24 +static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A6XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000a853 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; } +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET 0x0000a85b + +#define REG_A6XX_SP_DS_OBJ_START 0x0000a85c +#define A6XX_SP_DS_OBJ_START__MASK 0xffffffff +#define A6XX_SP_DS_OBJ_START__SHIFT 0 +static inline uint32_t A6XX_SP_DS_OBJ_START(uint32_t val) +{ + return ((val) << A6XX_SP_DS_OBJ_START__SHIFT) & A6XX_SP_DS_OBJ_START__MASK; +} + +#define REG_A6XX_SP_DS_PVT_MEM_PARAM 0x0000a85e +#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff +#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) +{ + return ((val >> 9) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; +} +#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A6XX_SP_DS_PVT_MEM_ADDR 0x0000a85f +#define A6XX_SP_DS_PVT_MEM_ADDR__MASK 0xffffffff +#define A6XX_SP_DS_PVT_MEM_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_DS_PVT_MEM_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_DS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_DS_PVT_MEM_ADDR__MASK; +} + +#define REG_A6XX_SP_DS_PVT_MEM_SIZE 0x0000a861 +#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff +#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 +static inline uint32_t A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) +{ + return ((val >> 12) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; +} +#define A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 + +#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862 + +#define REG_A6XX_SP_DS_CONFIG 0x0000a863 +#define A6XX_SP_DS_CONFIG_BINDLESS_TEX 0x00000001 +#define A6XX_SP_DS_CONFIG_BINDLESS_SAMP 0x00000002 +#define A6XX_SP_DS_CONFIG_BINDLESS_IBO 0x00000004 +#define A6XX_SP_DS_CONFIG_BINDLESS_UBO 0x00000008 +#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100 +#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00 +#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9 +static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val) +{ + return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK; +} +#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x003e0000 +#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17 +static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val) +{ + return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK; +} +#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x1fc00000 +#define A6XX_SP_DS_CONFIG_NIBO__SHIFT 22 +static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val) +{ + return ((val) << A6XX_SP_DS_CONFIG_NIBO__SHIFT) & A6XX_SP_DS_CONFIG_NIBO__MASK; +} + +#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864 + +#define REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET 0x0000a865 +#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff +#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) +{ + return ((val >> 11) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; +} + +#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870 +#define A6XX_SP_GS_CTRL_REG0_UNK20 0x00100000 +#define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK; +} +#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e +#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 +static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 +#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 +static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A6XX_SP_GS_CTRL_REG0_UNK13 0x00002000 +#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 +#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14 +static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871 + +#define REG_A6XX_SP_GS_BRANCH_COND 0x0000a872 + +#define REG_A6XX_SP_GS_PRIMITIVE_CNTL 0x0000a873 +#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f +#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT 0 +static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_OUT(uint32_t val) +{ + return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK; +} +#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0 +#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6 +static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK; +} + +static inline uint32_t REG_A6XX_SP_GS_OUT(uint32_t i0) { return 0x0000a874 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; } +#define A6XX_SP_GS_OUT_REG_A_REGID__MASK 0x000000ff +#define A6XX_SP_GS_OUT_REG_A_REGID__SHIFT 0 +static inline uint32_t A6XX_SP_GS_OUT_REG_A_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_A_REGID__MASK; +} +#define A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00000f00 +#define A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 8 +static inline uint32_t A6XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val) +{ + return ((val) << A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK; +} +#define A6XX_SP_GS_OUT_REG_B_REGID__MASK 0x00ff0000 +#define A6XX_SP_GS_OUT_REG_B_REGID__SHIFT 16 +static inline uint32_t A6XX_SP_GS_OUT_REG_B_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_B_REGID__MASK; +} +#define A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x0f000000 +#define A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 24 +static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val) +{ + return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK; +} + +static inline uint32_t REG_A6XX_SP_GS_VPC_DST(uint32_t i0) { return 0x0000a884 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; } +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0 +static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val) +{ + return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK; +} +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00 +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8 +static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val) +{ + return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK; +} +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000 +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16 +static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val) +{ + return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK; +} +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000 +#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24 +static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val) +{ + return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK; +} + +#define REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET 0x0000a88c + +#define REG_A6XX_SP_GS_OBJ_START 0x0000a88d +#define A6XX_SP_GS_OBJ_START__MASK 0xffffffff +#define A6XX_SP_GS_OBJ_START__SHIFT 0 +static inline uint32_t A6XX_SP_GS_OBJ_START(uint32_t val) +{ + return ((val) << A6XX_SP_GS_OBJ_START__SHIFT) & A6XX_SP_GS_OBJ_START__MASK; +} + +#define REG_A6XX_SP_GS_PVT_MEM_PARAM 0x0000a88f +#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff +#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) +{ + return ((val >> 9) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; +} +#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A6XX_SP_GS_PVT_MEM_ADDR 0x0000a890 +#define A6XX_SP_GS_PVT_MEM_ADDR__MASK 0xffffffff +#define A6XX_SP_GS_PVT_MEM_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_GS_PVT_MEM_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_GS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_GS_PVT_MEM_ADDR__MASK; +} + +#define REG_A6XX_SP_GS_PVT_MEM_SIZE 0x0000a892 +#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff +#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 +static inline uint32_t A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) +{ + return ((val >> 12) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; +} +#define A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 + +#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893 + +#define REG_A6XX_SP_GS_CONFIG 0x0000a894 +#define A6XX_SP_GS_CONFIG_BINDLESS_TEX 0x00000001 +#define A6XX_SP_GS_CONFIG_BINDLESS_SAMP 0x00000002 +#define A6XX_SP_GS_CONFIG_BINDLESS_IBO 0x00000004 +#define A6XX_SP_GS_CONFIG_BINDLESS_UBO 0x00000008 +#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100 +#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00 +#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9 +static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val) +{ + return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK; +} +#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x003e0000 +#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17 +static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val) +{ + return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK; +} +#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x1fc00000 +#define A6XX_SP_GS_CONFIG_NIBO__SHIFT 22 +static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val) +{ + return ((val) << A6XX_SP_GS_CONFIG_NIBO__SHIFT) & A6XX_SP_GS_CONFIG_NIBO__MASK; +} + +#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895 + +#define REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET 0x0000a896 +#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff +#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) +{ + return ((val >> 11) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; +} + +#define REG_A6XX_SP_VS_TEX_SAMP 0x0000a8a0 +#define A6XX_SP_VS_TEX_SAMP__MASK 0xffffffff +#define A6XX_SP_VS_TEX_SAMP__SHIFT 0 +static inline uint32_t A6XX_SP_VS_TEX_SAMP(uint32_t val) +{ + return ((val) << A6XX_SP_VS_TEX_SAMP__SHIFT) & A6XX_SP_VS_TEX_SAMP__MASK; +} + +#define REG_A6XX_SP_HS_TEX_SAMP 0x0000a8a2 +#define A6XX_SP_HS_TEX_SAMP__MASK 0xffffffff +#define A6XX_SP_HS_TEX_SAMP__SHIFT 0 +static inline uint32_t A6XX_SP_HS_TEX_SAMP(uint32_t val) +{ + return ((val) << A6XX_SP_HS_TEX_SAMP__SHIFT) & A6XX_SP_HS_TEX_SAMP__MASK; +} + +#define REG_A6XX_SP_DS_TEX_SAMP 0x0000a8a4 +#define A6XX_SP_DS_TEX_SAMP__MASK 0xffffffff +#define A6XX_SP_DS_TEX_SAMP__SHIFT 0 +static inline uint32_t A6XX_SP_DS_TEX_SAMP(uint32_t val) +{ + return ((val) << A6XX_SP_DS_TEX_SAMP__SHIFT) & A6XX_SP_DS_TEX_SAMP__MASK; +} + +#define REG_A6XX_SP_GS_TEX_SAMP 0x0000a8a6 +#define A6XX_SP_GS_TEX_SAMP__MASK 0xffffffff +#define A6XX_SP_GS_TEX_SAMP__SHIFT 0 +static inline uint32_t A6XX_SP_GS_TEX_SAMP(uint32_t val) +{ + return ((val) << A6XX_SP_GS_TEX_SAMP__SHIFT) & A6XX_SP_GS_TEX_SAMP__MASK; +} + +#define REG_A6XX_SP_VS_TEX_CONST 0x0000a8a8 +#define A6XX_SP_VS_TEX_CONST__MASK 0xffffffff +#define A6XX_SP_VS_TEX_CONST__SHIFT 0 +static inline uint32_t A6XX_SP_VS_TEX_CONST(uint32_t val) +{ + return ((val) << A6XX_SP_VS_TEX_CONST__SHIFT) & A6XX_SP_VS_TEX_CONST__MASK; +} + +#define REG_A6XX_SP_HS_TEX_CONST 0x0000a8aa +#define A6XX_SP_HS_TEX_CONST__MASK 0xffffffff +#define A6XX_SP_HS_TEX_CONST__SHIFT 0 +static inline uint32_t A6XX_SP_HS_TEX_CONST(uint32_t val) +{ + return ((val) << A6XX_SP_HS_TEX_CONST__SHIFT) & A6XX_SP_HS_TEX_CONST__MASK; +} + +#define REG_A6XX_SP_DS_TEX_CONST 0x0000a8ac +#define A6XX_SP_DS_TEX_CONST__MASK 0xffffffff +#define A6XX_SP_DS_TEX_CONST__SHIFT 0 +static inline uint32_t A6XX_SP_DS_TEX_CONST(uint32_t val) +{ + return ((val) << A6XX_SP_DS_TEX_CONST__SHIFT) & A6XX_SP_DS_TEX_CONST__MASK; +} + +#define REG_A6XX_SP_GS_TEX_CONST 0x0000a8ae +#define A6XX_SP_GS_TEX_CONST__MASK 0xffffffff +#define A6XX_SP_GS_TEX_CONST__SHIFT 0 +static inline uint32_t A6XX_SP_GS_TEX_CONST(uint32_t val) +{ + return ((val) << A6XX_SP_GS_TEX_CONST__SHIFT) & A6XX_SP_GS_TEX_CONST__MASK; +} + +#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980 +#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val) +{ + return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK; +} +#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000 +#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000 +#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE 0x00800000 +#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000 +#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000 +#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000 +#define A6XX_SP_FS_CTRL_REG0_UNK27__MASK 0x18000000 +#define A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT 27 +static inline uint32_t A6XX_SP_FS_CTRL_REG0_UNK27(uint32_t val) +{ + return ((val) << A6XX_SP_FS_CTRL_REG0_UNK27__SHIFT) & A6XX_SP_FS_CTRL_REG0_UNK27__MASK; +} +#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000 +#define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK; +} +#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e +#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 +static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 +#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 +static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A6XX_SP_FS_CTRL_REG0_UNK13 0x00002000 +#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 +#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14 +static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981 + +#define REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET 0x0000a982 + +#define REG_A6XX_SP_FS_OBJ_START 0x0000a983 +#define A6XX_SP_FS_OBJ_START__MASK 0xffffffff +#define A6XX_SP_FS_OBJ_START__SHIFT 0 +static inline uint32_t A6XX_SP_FS_OBJ_START(uint32_t val) +{ + return ((val) << A6XX_SP_FS_OBJ_START__SHIFT) & A6XX_SP_FS_OBJ_START__MASK; +} + +#define REG_A6XX_SP_FS_PVT_MEM_PARAM 0x0000a985 +#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff +#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) +{ + return ((val >> 9) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; +} +#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A6XX_SP_FS_PVT_MEM_ADDR 0x0000a986 +#define A6XX_SP_FS_PVT_MEM_ADDR__MASK 0xffffffff +#define A6XX_SP_FS_PVT_MEM_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_FS_PVT_MEM_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_FS_PVT_MEM_ADDR__MASK; +} + +#define REG_A6XX_SP_FS_PVT_MEM_SIZE 0x0000a988 +#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff +#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 +static inline uint32_t A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) +{ + return ((val >> 12) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; +} +#define A6XX_SP_FS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 + +#define REG_A6XX_SP_BLEND_CNTL 0x0000a989 +#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff +#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0 +static inline uint32_t A6XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val) +{ + return ((val) << A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK; +} +#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100 +#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200 +#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400 + +#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a +#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001 +#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002 +#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004 +#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008 +#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010 +#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020 +#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040 +#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080 + +#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b +#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f +#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK; +} +#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0 +#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK; +} +#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00 +#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK; +} +#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000 +#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK; +} +#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000 +#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK; +} +#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000 +#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK; +} +#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000 +#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK; +} +#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000 +#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28 +static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val) +{ + return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK; +} + +#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c +#define A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001 +#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00 +#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8 +static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK; +} +#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK 0x00ff0000 +#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT 16 +static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK; +} +#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK 0xff000000 +#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT 24 +static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK; +} + +#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d +#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f +#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0 +static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val) +{ + return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK; +} + +static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; } +#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff +#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0 +static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK; +} +#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100 + +static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; } +#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff +#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK; +} +#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100 +#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200 +#define A6XX_SP_FS_MRT_REG_UNK10 0x00000400 + +#define REG_A6XX_SP_FS_PREFETCH_CNTL 0x0000a99e +#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK 0x00000007 +#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT 0 +static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK; +} +#define A6XX_SP_FS_PREFETCH_CNTL_UNK3 0x00000008 +#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK 0x00000ff0 +#define A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT 4 +static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK4(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK4__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK4__MASK; +} +#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK 0x00007000 +#define A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT 12 +static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK12(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK12__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK12__MASK; +} + +static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; } +#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f +#define A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0 +static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SRC__MASK; +} +#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000780 +#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7 +static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK; +} +#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x0000f800 +#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 11 +static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK; +} +#define A6XX_SP_FS_PREFETCH_CMD_DST__MASK 0x003f0000 +#define A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT 16 +static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_DST(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_DST__MASK; +} +#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x03c00000 +#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 22 +static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK; +} +#define A6XX_SP_FS_PREFETCH_CMD_HALF 0x04000000 +#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xf8000000 +#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 27 +static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(uint32_t val) +{ + return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK; +} + +static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; } + +static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; } +#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x0000ffff +#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT 0 +static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK; +} +#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0xffff0000 +#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT 16 +static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val) +{ + return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK; +} + +#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7 + +#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8 + +#define REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET 0x0000a9a9 +#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff +#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) +{ + return ((val >> 11) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; +} + +#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0 +#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000 +#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20 +static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val) +{ + return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK; +} +#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000 +#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000 +#define A6XX_SP_CS_CTRL_REG0_SEPARATEPROLOG 0x00800000 +#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000 +#define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001 +#define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0 +static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val) +{ + return ((val) << A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK; +} +#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e +#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1 +static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK; +} +#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80 +#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7 +static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK; +} +#define A6XX_SP_CS_CTRL_REG0_UNK13 0x00002000 +#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000 +#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14 +static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK; +} + +#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1 +#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK 0x0000001f +#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT 0 +static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(uint32_t val) +{ + return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK; +} +#define A6XX_SP_CS_UNKNOWN_A9B1_UNK5 0x00000020 +#define A6XX_SP_CS_UNKNOWN_A9B1_UNK6 0x00000040 + +#define REG_A6XX_SP_CS_BRANCH_COND 0x0000a9b2 + +#define REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET 0x0000a9b3 + +#define REG_A6XX_SP_CS_OBJ_START 0x0000a9b4 +#define A6XX_SP_CS_OBJ_START__MASK 0xffffffff +#define A6XX_SP_CS_OBJ_START__SHIFT 0 +static inline uint32_t A6XX_SP_CS_OBJ_START(uint32_t val) +{ + return ((val) << A6XX_SP_CS_OBJ_START__SHIFT) & A6XX_SP_CS_OBJ_START__MASK; +} + +#define REG_A6XX_SP_CS_PVT_MEM_PARAM 0x0000a9b6 +#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff +#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0 +static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val) +{ + return ((val >> 9) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK; +} +#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000 +#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24 +static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val) +{ + return ((val) << A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK; +} + +#define REG_A6XX_SP_CS_PVT_MEM_ADDR 0x0000a9b7 +#define A6XX_SP_CS_PVT_MEM_ADDR__MASK 0xffffffff +#define A6XX_SP_CS_PVT_MEM_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_CS_PVT_MEM_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_CS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_CS_PVT_MEM_ADDR__MASK; +} + +#define REG_A6XX_SP_CS_PVT_MEM_SIZE 0x0000a9b9 +#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff +#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0 +static inline uint32_t A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val) +{ + return ((val >> 12) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK; +} +#define A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000 + +#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba + +#define REG_A6XX_SP_CS_CONFIG 0x0000a9bb +#define A6XX_SP_CS_CONFIG_BINDLESS_TEX 0x00000001 +#define A6XX_SP_CS_CONFIG_BINDLESS_SAMP 0x00000002 +#define A6XX_SP_CS_CONFIG_BINDLESS_IBO 0x00000004 +#define A6XX_SP_CS_CONFIG_BINDLESS_UBO 0x00000008 +#define A6XX_SP_CS_CONFIG_ENABLED 0x00000100 +#define A6XX_SP_CS_CONFIG_NTEX__MASK 0x0001fe00 +#define A6XX_SP_CS_CONFIG_NTEX__SHIFT 9 +static inline uint32_t A6XX_SP_CS_CONFIG_NTEX(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CONFIG_NTEX__SHIFT) & A6XX_SP_CS_CONFIG_NTEX__MASK; +} +#define A6XX_SP_CS_CONFIG_NSAMP__MASK 0x003e0000 +#define A6XX_SP_CS_CONFIG_NSAMP__SHIFT 17 +static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK; +} +#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x1fc00000 +#define A6XX_SP_CS_CONFIG_NIBO__SHIFT 22 +static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CONFIG_NIBO__SHIFT) & A6XX_SP_CS_CONFIG_NIBO__MASK; +} + +#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc + +#define REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET 0x0000a9bd +#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff +#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0 +static inline uint32_t A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val) +{ + return ((val >> 11) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK; +} + +#define REG_A6XX_SP_CS_CNTL_0 0x0000a9c2 +#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff +#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT 0 +static inline uint32_t A6XX_SP_CS_CNTL_0_WGIDCONSTID(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK; +} +#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00 +#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT 8 +static inline uint32_t A6XX_SP_CS_CNTL_0_WGSIZECONSTID(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK; +} +#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000 +#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16 +static inline uint32_t A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK; +} +#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000 +#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT 24 +static inline uint32_t A6XX_SP_CS_CNTL_0_LOCALIDREGID(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK; +} + +#define REG_A6XX_SP_CS_CNTL_1 0x0000a9c3 +#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff +#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0 +static inline uint32_t A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val) +{ + return ((val) << A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK; +} +#define A6XX_SP_CS_CNTL_1_SINGLE_SP_CORE 0x00000100 +#define A6XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000200 +#define A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 9 +static inline uint32_t A6XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val) +{ + return ((val) << A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_SP_CS_CNTL_1_THREADSIZE__MASK; +} +#define A6XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400 + +#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0 +#define A6XX_SP_FS_TEX_SAMP__MASK 0xffffffff +#define A6XX_SP_FS_TEX_SAMP__SHIFT 0 +static inline uint32_t A6XX_SP_FS_TEX_SAMP(uint32_t val) +{ + return ((val) << A6XX_SP_FS_TEX_SAMP__SHIFT) & A6XX_SP_FS_TEX_SAMP__MASK; +} + +#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2 +#define A6XX_SP_CS_TEX_SAMP__MASK 0xffffffff +#define A6XX_SP_CS_TEX_SAMP__SHIFT 0 +static inline uint32_t A6XX_SP_CS_TEX_SAMP(uint32_t val) +{ + return ((val) << A6XX_SP_CS_TEX_SAMP__SHIFT) & A6XX_SP_CS_TEX_SAMP__MASK; +} + +#define REG_A6XX_SP_FS_TEX_CONST 0x0000a9e4 +#define A6XX_SP_FS_TEX_CONST__MASK 0xffffffff +#define A6XX_SP_FS_TEX_CONST__SHIFT 0 +static inline uint32_t A6XX_SP_FS_TEX_CONST(uint32_t val) +{ + return ((val) << A6XX_SP_FS_TEX_CONST__SHIFT) & A6XX_SP_FS_TEX_CONST__MASK; +} + +#define REG_A6XX_SP_CS_TEX_CONST 0x0000a9e6 +#define A6XX_SP_CS_TEX_CONST__MASK 0xffffffff +#define A6XX_SP_CS_TEX_CONST__SHIFT 0 +static inline uint32_t A6XX_SP_CS_TEX_CONST(uint32_t val) +{ + return ((val) << A6XX_SP_CS_TEX_CONST__SHIFT) & A6XX_SP_CS_TEX_CONST__MASK; +} + +static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; } + +static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; } + +#define REG_A6XX_SP_CS_IBO 0x0000a9f2 +#define A6XX_SP_CS_IBO__MASK 0xffffffff +#define A6XX_SP_CS_IBO__SHIFT 0 +static inline uint32_t A6XX_SP_CS_IBO(uint32_t val) +{ + return ((val) << A6XX_SP_CS_IBO__SHIFT) & A6XX_SP_CS_IBO__MASK; +} + +#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00 + +#define REG_A6XX_SP_MODE_CONTROL 0x0000ab00 +#define A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE 0x00000001 +#define A6XX_SP_MODE_CONTROL_ISAMMODE__MASK 0x00000006 +#define A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT 1 +static inline uint32_t A6XX_SP_MODE_CONTROL_ISAMMODE(enum a6xx_isam_mode val) +{ + return ((val) << A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT) & A6XX_SP_MODE_CONTROL_ISAMMODE__MASK; +} +#define A6XX_SP_MODE_CONTROL_SHARED_CONSTS_ENABLE 0x00000008 + +#define REG_A6XX_SP_FS_CONFIG 0x0000ab04 +#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001 +#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP 0x00000002 +#define A6XX_SP_FS_CONFIG_BINDLESS_IBO 0x00000004 +#define A6XX_SP_FS_CONFIG_BINDLESS_UBO 0x00000008 +#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100 +#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00 +#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9 +static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val) +{ + return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK; +} +#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x003e0000 +#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17 +static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val) +{ + return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK; +} +#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x1fc00000 +#define A6XX_SP_FS_CONFIG_NIBO__SHIFT 22 +static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val) +{ + return ((val) << A6XX_SP_FS_CONFIG_NIBO__SHIFT) & A6XX_SP_FS_CONFIG_NIBO__MASK; +} + +#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05 + +static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab10 + 0x2*i0; } + +static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; } + +#define REG_A6XX_SP_IBO 0x0000ab1a +#define A6XX_SP_IBO__MASK 0xffffffff +#define A6XX_SP_IBO__SHIFT 0 +static inline uint32_t A6XX_SP_IBO(uint32_t val) +{ + return ((val) << A6XX_SP_IBO__SHIFT) & A6XX_SP_IBO__MASK; +} + +#define REG_A6XX_SP_IBO_COUNT 0x0000ab20 + +#define REG_A6XX_SP_2D_DST_FORMAT 0x0000acc0 +#define A6XX_SP_2D_DST_FORMAT_NORM 0x00000001 +#define A6XX_SP_2D_DST_FORMAT_SINT 0x00000002 +#define A6XX_SP_2D_DST_FORMAT_UINT 0x00000004 +#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8 +#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3 +static inline uint32_t A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK; +} +#define A6XX_SP_2D_DST_FORMAT_SRGB 0x00000800 +#define A6XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000 +#define A6XX_SP_2D_DST_FORMAT_MASK__SHIFT 12 +static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val) +{ + return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK; +} + +#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00 + +#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01 + +#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02 + +#define REG_A6XX_SP_CHICKEN_BITS 0x0000ae03 + +#define REG_A6XX_SP_FLOAT_CNTL 0x0000ae04 +#define A6XX_SP_FLOAT_CNTL_F16_NO_INF 0x00000008 + +#define REG_A6XX_SP_PERFCTR_ENABLE 0x0000ae0f +#define A6XX_SP_PERFCTR_ENABLE_VS 0x00000001 +#define A6XX_SP_PERFCTR_ENABLE_HS 0x00000002 +#define A6XX_SP_PERFCTR_ENABLE_DS 0x00000004 +#define A6XX_SP_PERFCTR_ENABLE_GS 0x00000008 +#define A6XX_SP_PERFCTR_ENABLE_FS 0x00000010 +#define A6XX_SP_PERFCTR_ENABLE_CS 0x00000020 + +static inline uint32_t REG_A6XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae10 + 0x1*i0; } + +#define REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22 + +#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180 +#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff +#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK; +} + +#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182 + +#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183 + +#define REG_A6XX_SP_UNKNOWN_B190 0x0000b190 + +#define REG_A6XX_SP_UNKNOWN_B191 0x0000b191 + +#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300 +#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK; +} +#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK 0x0000000c +#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT 2 +static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_UNK2(uint32_t val) +{ + return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK; +} + +#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301 +#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003 +#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0 +static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK; +} +#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004 + +#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302 +#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff +#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0 +static inline uint32_t A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(uint32_t val) +{ + return ((val) << A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK; +} + +#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304 +#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001 +#define A6XX_SP_TP_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002 + +#define REG_A6XX_SP_TP_SAMPLE_LOCATION_0 0x0000b305 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000 +#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK; +} + +#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1 0x0000b306 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK; +} +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000 +#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28 +static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val) +{ + return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK; +} + +#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307 +#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00003fff +#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val) +{ + return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK; +} +#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x3fff0000 +#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val) +{ + return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK; +} + +#define REG_A6XX_SP_TP_MODE_CNTL 0x0000b309 +#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK 0x00000003 +#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT 0 +static inline uint32_t A6XX_SP_TP_MODE_CNTL_ISAMMODE(enum a6xx_isam_mode val) +{ + return ((val) << A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT) & A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK; +} +#define A6XX_SP_TP_MODE_CNTL_UNK3__MASK 0x000000fc +#define A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT 2 +static inline uint32_t A6XX_SP_TP_MODE_CNTL_UNK3(uint32_t val) +{ + return ((val) << A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT) & A6XX_SP_TP_MODE_CNTL_UNK3__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0 +#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff +#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK; +} +#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300 +#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8 +static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK; +} +#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00 +#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10 +static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK; +} +#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000 +#define A6XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000 +#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000 +#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14 +static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK; +} +#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000 +#define A6XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000 +#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000 +#define A6XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000 +#define A6XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000 +#define A6XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000 +#define A6XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000 +#define A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000 +#define A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23 +static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK; +} +#define A6XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000 + +#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1 +#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff +#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK; +} +#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000 +#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15 +static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2 +#define A6XX_SP_PS_2D_SRC__MASK 0xffffffff +#define A6XX_SP_PS_2D_SRC__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC__SHIFT) & A6XX_SP_PS_2D_SRC__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4 +#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff +#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK; +} +#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00 +#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9 +static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5 +#define A6XX_SP_PS_2D_SRC_PLANE1__MASK 0xffffffff +#define A6XX_SP_PS_2D_SRC_PLANE1__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE1(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_PLANE1__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE1__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b4c7 +#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff +#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC_PLANE2 0x0000b4c8 +#define A6XX_SP_PS_2D_SRC_PLANE2__MASK 0xffffffff +#define A6XX_SP_PS_2D_SRC_PLANE2__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE2(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_PLANE2__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE2__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca +#define A6XX_SP_PS_2D_SRC_FLAGS__MASK 0xffffffff +#define A6XX_SP_PS_2D_SRC_FLAGS__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS(uint32_t val) +{ + return ((val) << A6XX_SP_PS_2D_SRC_FLAGS__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS__MASK; +} + +#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc +#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff +#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0 +static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK; +} + +#define REG_A6XX_SP_PS_UNKNOWN_B4CD 0x0000b4cd + +#define REG_A6XX_SP_PS_UNKNOWN_B4CE 0x0000b4ce + +#define REG_A6XX_SP_PS_UNKNOWN_B4CF 0x0000b4cf + +#define REG_A6XX_SP_PS_UNKNOWN_B4D0 0x0000b4d0 + +#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1 +#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff +#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0 +static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val) +{ + return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK; +} +#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000 +#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16 +static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val) +{ + return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK; +} + +#define REG_A6XX_TPL1_DBG_ECO_CNTL 0x0000b600 + +#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601 + +#define REG_A6XX_TPL1_UNKNOWN_B602 0x0000b602 + +#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604 +#define A6XX_TPL1_NC_MODE_CNTL_MODE 0x00000001 +#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006 +#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT 1 +static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT(uint32_t val) +{ + return ((val) << A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK; +} +#define A6XX_TPL1_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008 +#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000010 +#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT 4 +static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT(uint32_t val) +{ + return ((val) << A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK; +} +#define A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK 0x000000c0 +#define A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT 6 +static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UNK6(uint32_t val) +{ + return ((val) << A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK; +} + +#define REG_A6XX_TPL1_UNKNOWN_B605 0x0000b605 + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608 + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609 + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c + +static inline uint32_t REG_A6XX_TPL1_PERFCTR_TP_SEL(uint32_t i0) { return 0x0000b610 + 0x1*i0; } + +#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800 +#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff +#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0 +static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK; +} +#define A6XX_HLSQ_VS_CNTL_ENABLED 0x00000100 + +#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801 +#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff +#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0 +static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK; +} +#define A6XX_HLSQ_HS_CNTL_ENABLED 0x00000100 + +#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802 +#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff +#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0 +static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK; +} +#define A6XX_HLSQ_DS_CNTL_ENABLED 0x00000100 + +#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803 +#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff +#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0 +static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK; +} +#define A6XX_HLSQ_GS_CNTL_ENABLED 0x00000100 + +#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820 + +#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821 +#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK 0xffffffff +#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT 0 +static inline uint32_t A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR(uint32_t val) +{ + return ((val) << A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK; +} + +#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823 + +#define REG_A6XX_HLSQ_FS_CNTL_0 0x0000b980 +#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001 +#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0 +static inline uint32_t A6XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val) +{ + return ((val) << A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK; +} +#define A6XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002 +#define A6XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc +#define A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2 +static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val) +{ + return ((val) << A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A6XX_HLSQ_FS_CNTL_0_UNK2__MASK; +} + +#define REG_A6XX_HLSQ_UNKNOWN_B981 0x0000b981 + +#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982 + +#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983 +#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff +#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; +} +#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00 +#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8 +static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK; +} +#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000 +#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16 +static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK; +} +#define A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK 0xff000000 +#define A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT 24 +static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SIZE(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_2_REG_SIZE__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SIZE__MASK; +} + +#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984 +#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff +#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK; +} +#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00 +#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8 +static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK; +} +#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000 +#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16 +static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK; +} +#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000 +#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24 +static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK; +} + +#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985 +#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff +#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK; +} +#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00 +#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8 +static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK; +} +#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000 +#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16 +static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK; +} +#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000 +#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24 +static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK; +} + +#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986 +#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff +#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK; +} +#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00 +#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8 +static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK; +} + +#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987 +#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff +#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK; +} +#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100 + +#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990 +#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003 +#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK; +} +#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc +#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK; +} +#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000 +#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK; +} +#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000 +#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK; +} + +#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991 +#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff +#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK; +} + +#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992 +#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff +#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK; +} + +#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993 +#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff +#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK; +} + +#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994 +#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff +#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK; +} + +#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995 +#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff +#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK; +} + +#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996 +#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff +#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK; +} + +#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997 +#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff +#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK; +} +#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00 +#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT 8 +static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK; +} +#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000 +#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16 +static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK; +} +#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000 +#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24 +static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK; +} + +#define REG_A6XX_HLSQ_CS_CNTL_1 0x0000b998 +#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff +#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK; +} +#define A6XX_HLSQ_CS_CNTL_1_SINGLE_SP_CORE 0x00000100 +#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200 +#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9 +static inline uint32_t A6XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val) +{ + return ((val) << A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK; +} +#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400 + +#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999 + +#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a + +#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b + +#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0 + +#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1 +#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK 0xffffffff +#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT 0 +static inline uint32_t A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR(uint32_t val) +{ + return ((val) << A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK; +} + +#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3 + +static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; } + +static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; } + +#define REG_A6XX_HLSQ_CS_UNKNOWN_B9D0 0x0000b9d0 +#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK 0x0000001f +#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT 0 +static inline uint32_t A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(uint32_t val) +{ + return ((val) << A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT) & A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK; +} +#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK5 0x00000020 +#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6 0x00000040 + +#define REG_A6XX_HLSQ_DRAW_CMD 0x0000bb00 +#define A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK 0x000000ff +#define A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_HLSQ_DRAW_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK; +} + +#define REG_A6XX_HLSQ_DISPATCH_CMD 0x0000bb01 +#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK 0x000000ff +#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_HLSQ_DISPATCH_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK; +} + +#define REG_A6XX_HLSQ_EVENT_CMD 0x0000bb02 +#define A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK 0x00ff0000 +#define A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT 16 +static inline uint32_t A6XX_HLSQ_EVENT_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK; +} +#define A6XX_HLSQ_EVENT_CMD_EVENT__MASK 0x0000007f +#define A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT 0 +static inline uint32_t A6XX_HLSQ_EVENT_CMD_EVENT(enum vgt_event_type val) +{ + return ((val) << A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_EVENT_CMD_EVENT__MASK; +} + +#define REG_A6XX_HLSQ_INVALIDATE_CMD 0x0000bb08 +#define A6XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001 +#define A6XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002 +#define A6XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004 +#define A6XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008 +#define A6XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010 +#define A6XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020 +#define A6XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040 +#define A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080 +#define A6XX_HLSQ_INVALIDATE_CMD_CS_SHARED_CONST 0x00080000 +#define A6XX_HLSQ_INVALIDATE_CMD_GFX_SHARED_CONST 0x00000100 +#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x00003e00 +#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9 +static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val) +{ + return ((val) << A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK; +} +#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x0007c000 +#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 14 +static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val) +{ + return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK; +} + +#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10 +#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff +#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0 +static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val) +{ + return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK; +} +#define A6XX_HLSQ_FS_CNTL_ENABLED 0x00000100 + +#define REG_A6XX_HLSQ_SHARED_CONSTS 0x0000bb11 +#define A6XX_HLSQ_SHARED_CONSTS_ENABLE 0x00000001 + +static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE(uint32_t i0) { return 0x0000bb20 + 0x2*i0; } + +static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_ADDR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; } + +#define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80 +#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00 +#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT 8 +static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK; +} +#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK 0x0000007f +#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT 0 +static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val) +{ + return ((val) << A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK; +} + +#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00 + +#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01 + +#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04 + +#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05 + +#define REG_A6XX_HLSQ_UNKNOWN_BE08 0x0000be08 + +static inline uint32_t REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000be10 + 0x1*i0; } + +#define REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22 + +#define REG_A6XX_CP_EVENT_START 0x0000d600 +#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff +#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_CP_EVENT_START_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_CP_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_EVENT_START_STATE_ID__MASK; +} + +#define REG_A6XX_CP_EVENT_END 0x0000d601 +#define A6XX_CP_EVENT_END_STATE_ID__MASK 0x000000ff +#define A6XX_CP_EVENT_END_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_CP_EVENT_END_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_CP_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_EVENT_END_STATE_ID__MASK; +} + +#define REG_A6XX_CP_2D_EVENT_START 0x0000d700 +#define A6XX_CP_2D_EVENT_START_STATE_ID__MASK 0x000000ff +#define A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_CP_2D_EVENT_START_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_START_STATE_ID__MASK; +} + +#define REG_A6XX_CP_2D_EVENT_END 0x0000d701 +#define A6XX_CP_2D_EVENT_END_STATE_ID__MASK 0x000000ff +#define A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT 0 +static inline uint32_t A6XX_CP_2D_EVENT_END_STATE_ID(uint32_t val) +{ + return ((val) << A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_END_STATE_ID__MASK; +} + +#define REG_A6XX_TEX_SAMP_0 0x00000000 +#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 +#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 +#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1 +static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val) +{ + return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK; +} +#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018 +#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3 +static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val) +{ + return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK; +} +#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0 +#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5 +static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val) +{ + return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK; +} +#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700 +#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8 +static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val) +{ + return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK; +} +#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800 +#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11 +static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val) +{ + return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK; +} +#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000 +#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14 +static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val) +{ + return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK; +} +#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000 +#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19 +static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val) +{ + return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK; +} + +#define REG_A6XX_TEX_SAMP_1 0x00000001 +#define A6XX_TEX_SAMP_1_CLAMPENABLE 0x00000001 +#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e +#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1 +static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val) +{ + return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK; +} +#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010 +#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020 +#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040 +#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00 +#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8 +static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK; +} +#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000 +#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20 +static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val) +{ + return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK; +} + +#define REG_A6XX_TEX_SAMP_2 0x00000002 +#define A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK 0x00000003 +#define A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT 0 +static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode val) +{ + return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK; +} +#define A6XX_TEX_SAMP_2_CHROMA_LINEAR 0x00000020 +#define A6XX_TEX_SAMP_2_BCOLOR__MASK 0xffffff80 +#define A6XX_TEX_SAMP_2_BCOLOR__SHIFT 7 +static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR(uint32_t val) +{ + return ((val) << A6XX_TEX_SAMP_2_BCOLOR__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR__MASK; +} + +#define REG_A6XX_TEX_SAMP_3 0x00000003 + +#define REG_A6XX_TEX_CONST_0 0x00000000 +#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003 +#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val) +{ + return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK; +} +#define A6XX_TEX_CONST_0_SRGB 0x00000004 +#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 +#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4 +static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val) +{ + return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK; +} +#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380 +#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7 +static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val) +{ + return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK; +} +#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00 +#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10 +static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val) +{ + return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK; +} +#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000 +#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13 +static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val) +{ + return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK; +} +#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000 +#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16 +static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK; +} +#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X 0x00010000 +#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y 0x00040000 +#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000 +#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20 +static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val) +{ + return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK; +} +#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000 +#define A6XX_TEX_CONST_0_FMT__SHIFT 22 +static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_format val) +{ + return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK; +} +#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000 +#define A6XX_TEX_CONST_0_SWAP__SHIFT 30 +static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val) +{ + return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK; +} + +#define REG_A6XX_TEX_CONST_1 0x00000001 +#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff +#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK; +} +#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000 +#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15 +static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK; +} + +#define REG_A6XX_TEX_CONST_2 0x00000002 +#define A6XX_TEX_CONST_2_BUFFER 0x00000010 +#define A6XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f +#define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A6XX_TEX_CONST_2_PITCHALIGN__MASK; +} +#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80 +#define A6XX_TEX_CONST_2_PITCH__SHIFT 7 +static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK; +} +#define A6XX_TEX_CONST_2_TYPE__MASK 0xe0000000 +#define A6XX_TEX_CONST_2_TYPE__SHIFT 29 +static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val) +{ + return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK; +} + +#define REG_A6XX_TEX_CONST_3 0x00000003 +#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff +#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK; +} +#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000 +#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23 +static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val) +{ + return ((val >> 12) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK; +} +#define A6XX_TEX_CONST_3_TILE_ALL 0x08000000 +#define A6XX_TEX_CONST_3_FLAG 0x10000000 + +#define REG_A6XX_TEX_CONST_4 0x00000004 +#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0 +#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5 +static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val) +{ + return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK; +} + +#define REG_A6XX_TEX_CONST_5 0x00000005 +#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff +#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK; +} +#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000 +#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17 +static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK; +} + +#define REG_A6XX_TEX_CONST_6 0x00000006 +#define A6XX_TEX_CONST_6_PLANE_PITCH__MASK 0xffffff00 +#define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT 8 +static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT) & A6XX_TEX_CONST_6_PLANE_PITCH__MASK; +} + +#define REG_A6XX_TEX_CONST_7 0x00000007 +#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0 +#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5 +static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val) +{ + return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK; +} + +#define REG_A6XX_TEX_CONST_8 0x00000008 +#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff +#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK; +} + +#define REG_A6XX_TEX_CONST_9 0x00000009 +#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK 0x0001ffff +#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val) +{ + return ((val >> 4) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK; +} + +#define REG_A6XX_TEX_CONST_10 0x0000000a +#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK 0x0000007f +#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT 0 +static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val) +{ + return ((val >> 6) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK; +} +#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK 0x00000f00 +#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT 8 +static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK; +} +#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK 0x0000f000 +#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT 12 +static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(uint32_t val) +{ + return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK; +} + +#define REG_A6XX_TEX_CONST_11 0x0000000b + +#define REG_A6XX_TEX_CONST_12 0x0000000c + +#define REG_A6XX_TEX_CONST_13 0x0000000d + +#define REG_A6XX_TEX_CONST_14 0x0000000e + +#define REG_A6XX_TEX_CONST_15 0x0000000f + +#define REG_A6XX_UBO_0 0x00000000 +#define A6XX_UBO_0_BASE_LO__MASK 0xffffffff +#define A6XX_UBO_0_BASE_LO__SHIFT 0 +static inline uint32_t A6XX_UBO_0_BASE_LO(uint32_t val) +{ + return ((val) << A6XX_UBO_0_BASE_LO__SHIFT) & A6XX_UBO_0_BASE_LO__MASK; +} + +#define REG_A6XX_UBO_1 0x00000001 +#define A6XX_UBO_1_BASE_HI__MASK 0x0001ffff +#define A6XX_UBO_1_BASE_HI__SHIFT 0 +static inline uint32_t A6XX_UBO_1_BASE_HI(uint32_t val) +{ + return ((val) << A6XX_UBO_1_BASE_HI__SHIFT) & A6XX_UBO_1_BASE_HI__MASK; +} +#define A6XX_UBO_1_SIZE__MASK 0xfffe0000 +#define A6XX_UBO_1_SIZE__SHIFT 17 +static inline uint32_t A6XX_UBO_1_SIZE(uint32_t val) +{ + return ((val) << A6XX_UBO_1_SIZE__SHIFT) & A6XX_UBO_1_SIZE__MASK; +} + +#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140 + +#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148 + +#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540 + +#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541 + +#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542 + +#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543 + +#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544 + +#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545 + +#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572 + +#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573 + +#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574 + +#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575 + +#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576 + +#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577 + +#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4 + +#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5 + +#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6 + +#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7 + +#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8 + +#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9 + +#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6 + +#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7 + +#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8 + +#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9 + +#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da + +#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db + +#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000 + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000 +#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff +#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00 +#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK; +} + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001 + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002 + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003 + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004 +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000 +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000 +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK; +} + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005 +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000 +#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK; +} + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008 + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009 + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK; +} + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK; +} +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000 +#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28 +static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val) +{ + return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK; +} + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f + +#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030 + +#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001 + +#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002 + + +#endif /* A6XX_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c new file mode 100644 index 000000000..870252bef --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -0,0 +1,1658 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include + +#include "a6xx_gpu.h" +#include "a6xx_gmu.xml.h" +#include "msm_gem.h" +#include "msm_gpu_trace.h" +#include "msm_mmu.h" + +static void a6xx_gmu_fault(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + /* FIXME: add a banner here */ + gmu->hung = true; + + /* Turn off the hangcheck timer while we are resetting */ + del_timer(&gpu->hangcheck_timer); + + /* Queue the GPU handler because we need to treat this as a recovery */ + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +static irqreturn_t a6xx_gmu_irq(int irq, void *data) +{ + struct a6xx_gmu *gmu = data; + u32 status; + + status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); + + if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { + dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); + + a6xx_gmu_fault(gmu); + } + + if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) + dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); + + if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) + dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", + gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); + + return IRQ_HANDLED; +} + +static irqreturn_t a6xx_hfi_irq(int irq, void *data) +{ + struct a6xx_gmu *gmu = data; + u32 status; + + status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); + + if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { + dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); + + a6xx_gmu_fault(gmu); + } + + return IRQ_HANDLED; +} + +bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) +{ + u32 val; + + /* This can be called from gpu state code so make sure GMU is valid */ + if (!gmu->initialized) + return false; + + val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); + + return !(val & + (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | + A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); +} + +/* Check to see if the GX rail is still powered */ +bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) +{ + u32 val; + + /* This can be called from gpu state code so make sure GMU is valid */ + if (!gmu->initialized) + return false; + + val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); + + return !(val & + (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | + A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); +} + +void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, + bool suspended) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + u32 perf_index; + unsigned long gpu_freq; + int ret = 0; + + gpu_freq = dev_pm_opp_get_freq(opp); + + if (gpu_freq == gmu->freq) + return; + + for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) + if (gpu_freq == gmu->gpu_freqs[perf_index]) + break; + + gmu->current_perf_index = perf_index; + gmu->freq = gmu->gpu_freqs[perf_index]; + + trace_msm_gmu_freq_change(gmu->freq, perf_index); + + /* + * This can get called from devfreq while the hardware is idle. Don't + * bring up the power if it isn't already active. All we're doing here + * is updating the frequency so that when we come back online we're at + * the right rate. + */ + if (suspended) + return; + + if (!gmu->legacy) { + a6xx_hfi_set_freq(gmu, perf_index); + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); + return; + } + + gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); + + gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, + ((3 & 0xf) << 28) | perf_index); + + /* + * Send an invalid index as a vote for the bus bandwidth and let the + * firmware decide on the right vote + */ + gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); + + /* Set and clear the OOB for DCVS to trigger the GMU */ + a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); + a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); + + ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); + if (ret) + dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); + + dev_pm_opp_set_opp(&gpu->pdev->dev, opp); +} + +unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + + return gmu->freq; +} + +static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) +{ + u32 val; + int local = gmu->idle_level; + + /* SPTP and IFPC both report as IFPC */ + if (gmu->idle_level == GMU_IDLE_STATE_SPTP) + local = GMU_IDLE_STATE_IFPC; + + val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + + if (val == local) { + if (gmu->idle_level != GMU_IDLE_STATE_IFPC || + !a6xx_gmu_gx_is_on(gmu)) + return true; + } + + return false; +} + +/* Wait for the GMU to get to its most idle state */ +int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) +{ + return spin_until(a6xx_gmu_check_idle_level(gmu)); +} + +static int a6xx_gmu_start(struct a6xx_gmu *gmu) +{ + int ret; + u32 val; + u32 mask, reset_val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } + + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); + + /* Set the log wptr index + * note: downstream saves the value in poweroff and restores it here + */ + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); + + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); + + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, + (val & mask) == reset_val, 100, 10000); + + if (ret) + DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); + + return ret; +} + +static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) +{ + u32 val; + int ret; + + gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); + + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, + val & 1, 100, 10000); + if (ret) + DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); + + return ret; +} + +struct a6xx_gmu_oob_bits { + int set, ack, set_new, ack_new, clear, clear_new; + const char *name; +}; + +/* These are the interrupt / ack bits for each OOB request that are set + * in a6xx_gmu_set_oob and a6xx_clear_oob + */ +static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { + [GMU_OOB_GPU_SET] = { + .name = "GPU_SET", + .set = 16, + .ack = 24, + .set_new = 30, + .ack_new = 31, + .clear = 24, + .clear_new = 31, + }, + + [GMU_OOB_PERFCOUNTER_SET] = { + .name = "PERFCOUNTER", + .set = 17, + .ack = 25, + .set_new = 28, + .ack_new = 30, + .clear = 25, + .clear_new = 29, + }, + + [GMU_OOB_BOOT_SLUMBER] = { + .name = "BOOT_SLUMBER", + .set = 22, + .ack = 30, + .clear = 30, + }, + + [GMU_OOB_DCVS_SET] = { + .name = "GPU_DCVS", + .set = 23, + .ack = 31, + .clear = 31, + }, +}; + +/* Trigger a OOB (out of band) request to the GMU */ +int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) +{ + int ret; + u32 val; + int request, ack; + + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) + return -EINVAL; + + if (gmu->legacy) { + request = a6xx_gmu_oob_bits[state].set; + ack = a6xx_gmu_oob_bits[state].ack; + } else { + request = a6xx_gmu_oob_bits[state].set_new; + ack = a6xx_gmu_oob_bits[state].ack_new; + if (!request || !ack) { + DRM_DEV_ERROR(gmu->dev, + "Invalid non-legacy GMU request %s\n", + a6xx_gmu_oob_bits[state].name); + return -EINVAL; + } + } + + /* Trigger the equested OOB operation */ + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); + + /* Wait for the acknowledge interrupt */ + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, + val & (1 << ack), 100, 10000); + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Timeout waiting for GMU OOB set %s: 0x%x\n", + a6xx_gmu_oob_bits[state].name, + gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); + + /* Clear the acknowledge interrupt */ + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); + + return ret; +} + +/* Clear a pending OOB state in the GMU */ +void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) +{ + int bit; + + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) + return; + + if (gmu->legacy) + bit = a6xx_gmu_oob_bits[state].clear; + else + bit = a6xx_gmu_oob_bits[state].clear_new; + + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); +} + +/* Enable CPU control of SPTP power power collapse */ +static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) +{ + int ret; + u32 val; + + if (!gmu->legacy) + return 0; + + gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); + + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, + (val & 0x38) == 0x28, 1, 100); + + if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", + gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); + } + + return 0; +} + +/* Disable CPU control of SPTP power power collapse */ +static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) +{ + u32 val; + int ret; + + if (!gmu->legacy) + return; + + /* Make sure retention is on */ + gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); + + gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); + + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, + (val & 0x04), 100, 10000); + + if (ret) + DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", + gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); +} + +/* Let the GMU know we are starting a boot sequence */ +static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) +{ + u32 vote; + + /* Let the GMU know we are getting ready for boot */ + gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); + + /* Choose the "default" power level as the highest available */ + vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; + + gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); + gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); + + /* Let the GMU know the boot sequence has started */ + return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); +} + +/* Let the GMU know that we are about to go into slumber */ +static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) +{ + int ret; + + /* Disable the power counter so the GMU isn't busy */ + gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + + /* Disable SPTP_PC if the CPU is responsible for it */ + if (gmu->idle_level < GMU_IDLE_STATE_SPTP) + a6xx_sptprac_disable(gmu); + + if (!gmu->legacy) { + ret = a6xx_hfi_send_prep_slumber(gmu); + goto out; + } + + /* Tell the GMU to get ready to slumber */ + gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); + + ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); + a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); + + if (!ret) { + /* Check to see if the GMU really did slumber */ + if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) + != 0x0f) { + DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); + ret = -ETIMEDOUT; + } + } + +out: + /* Put fence into allow mode */ + gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + return ret; +} + +static int a6xx_rpmh_start(struct a6xx_gmu *gmu) +{ + int ret; + u32 val; + + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); + /* Wait for the register to finish posting */ + wmb(); + + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, + val & (1 << 1), 100, 10000); + if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); + return ret; + } + + ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, + !val, 100, 10000); + + if (ret) { + DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); + return ret; + } + + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); + + /* Set up CX GMU counter 0 to count busy ticks */ + gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); + gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); + + /* Enable the power counter */ + gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); + return 0; +} + +static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) +{ + int ret; + u32 val; + + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); + + ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, + val, val & (1 << 16), 100, 10000); + if (ret) + DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); + + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); +} + +static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) +{ + msm_writel(value, ptr + (offset << 2)); +} + +static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, + const char *name); + +static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct platform_device *pdev = to_platform_device(gmu->dev); + void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); + void __iomem *seqptr = NULL; + uint32_t pdc_address_offset; + bool pdc_in_aop = false; + + if (IS_ERR(pdcptr)) + goto err; + + if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) + pdc_in_aop = true; + else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) + pdc_address_offset = 0x30090; + else if (adreno_is_a619(adreno_gpu)) + pdc_address_offset = 0x300a0; + else + pdc_address_offset = 0x30080; + + if (!pdc_in_aop) { + seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); + if (IS_ERR(seqptr)) + goto err; + } + + /* Disable SDE clock gating */ + gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); + + /* Setup RSC PDC handshake for sleep and wakeup */ + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); + + /* Load RSC sequencer uCode for sleep and wakeup */ + if (adreno_is_a650_family(adreno_gpu)) { + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); + } else { + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); + } + + if (pdc_in_aop) + goto setup_pdc; + + /* Load PDC sequencer uCode for power up and power down sequence */ + pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); + pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); + pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); + pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); + pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); + + /* Set TCS commands used by PDC sequence for low power modes */ + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); + + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); + + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); + + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); + if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); + else + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); + + /* Setup GPU PDC */ +setup_pdc: + pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); + + /* ensure no writes happen before the uCode is fully written */ + wmb(); + +err: + if (!IS_ERR_OR_NULL(pdcptr)) + iounmap(pdcptr); + if (!IS_ERR_OR_NULL(seqptr)) + iounmap(seqptr); +} + +/* + * The lowest 16 bits of this value are the number of XO clock cycles for main + * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are + * for the shorter hysteresis that happens after main - this is 0xa (.5 us) + */ + +#define GMU_PWR_COL_HYST 0x000a1680 + +/* Set up the idle state for the GMU */ +static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) +{ + /* Disable GMU WB/RB buffer */ + gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); + gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); + gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); + + gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); + + switch (gmu->idle_level) { + case GMU_IDLE_STATE_IFPC: + gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, + GMU_PWR_COL_HYST); + gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, + A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | + A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); + fallthrough; + case GMU_IDLE_STATE_SPTP: + gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, + GMU_PWR_COL_HYST); + gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, + A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | + A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); + } + + /* Enable RPMh GPU client */ + gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, + A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | + A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | + A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | + A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | + A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | + A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); +} + +struct block_header { + u32 addr; + u32 size; + u32 type; + u32 value; + u32 data[]; +}; + +/* this should be a general kernel helper */ +static int in_range(u32 addr, u32 start, u32 size) +{ + return addr >= start && addr < start + size; +} + +static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) +{ + if (!in_range(blk->addr, bo->iova, bo->size)) + return false; + + memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); + return true; +} + +static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; + const struct block_header *blk; + u32 reg_offset; + + u32 itcm_base = 0x00000000; + u32 dtcm_base = 0x00040000; + + if (adreno_is_a650_family(adreno_gpu)) + dtcm_base = 0x10004000; + + if (gmu->legacy) { + /* Sanity check the size of the firmware that was loaded */ + if (fw_image->size > 0x8000) { + DRM_DEV_ERROR(gmu->dev, + "GMU firmware is bigger than the available region\n"); + return -EINVAL; + } + + gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, + (u32*) fw_image->data, fw_image->size); + return 0; + } + + + for (blk = (const struct block_header *) fw_image->data; + (const u8*) blk < fw_image->data + fw_image->size; + blk = (const struct block_header *) &blk->data[blk->size >> 2]) { + if (blk->size == 0) + continue; + + if (in_range(blk->addr, itcm_base, SZ_16K)) { + reg_offset = (blk->addr - itcm_base) >> 2; + gmu_write_bulk(gmu, + REG_A6XX_GMU_CM3_ITCM_START + reg_offset, + blk->data, blk->size); + } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { + reg_offset = (blk->addr - dtcm_base) >> 2; + gmu_write_bulk(gmu, + REG_A6XX_GMU_CM3_DTCM_START + reg_offset, + blk->data, blk->size); + } else if (!fw_block_mem(&gmu->icache, blk) && + !fw_block_mem(&gmu->dcache, blk) && + !fw_block_mem(&gmu->dummy, blk)) { + DRM_DEV_ERROR(gmu->dev, + "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n", + blk->addr, blk->size, blk->data[0]); + } + } + + return 0; +} + +static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) +{ + static bool rpmh_init; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + int ret; + u32 chipid; + + if (adreno_is_a650_family(adreno_gpu)) { + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); + } + + if (state == GMU_WARM_BOOT) { + ret = a6xx_rpmh_start(gmu); + if (ret) + return ret; + } else { + if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], + "GMU firmware is not loaded\n")) + return -ENOENT; + + /* Turn on register retention */ + gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); + + /* We only need to load the RPMh microcode once */ + if (!rpmh_init) { + a6xx_gmu_rpmh_init(gmu); + rpmh_init = true; + } else { + ret = a6xx_rpmh_start(gmu); + if (ret) + return ret; + } + + ret = a6xx_gmu_fw_load(gmu); + if (ret) + return ret; + } + + gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); + gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); + + /* Write the iova of the HFI table */ + gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); + gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); + + gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, + (1 << 31) | (0xa << 18) | (0xa0)); + + chipid = adreno_gpu->rev.core << 24; + chipid |= adreno_gpu->rev.major << 16; + chipid |= adreno_gpu->rev.minor << 12; + chipid |= adreno_gpu->rev.patchid << 8; + + gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); + + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, + gmu->log.iova | (gmu->log.size / SZ_4K - 1)); + + /* Set up the lowest idle level on the GMU */ + a6xx_gmu_power_config(gmu); + + ret = a6xx_gmu_start(gmu); + if (ret) + return ret; + + if (gmu->legacy) { + ret = a6xx_gmu_gfx_rail_on(gmu); + if (ret) + return ret; + } + + /* Enable SPTP_PC if the CPU is responsible for it */ + if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { + ret = a6xx_sptprac_enable(gmu); + if (ret) + return ret; + } + + ret = a6xx_gmu_hfi_start(gmu); + if (ret) + return ret; + + /* FIXME: Do we need this wmb() here? */ + wmb(); + + return 0; +} + +#define A6XX_HFI_IRQ_MASK \ + (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) + +#define A6XX_GMU_IRQ_MASK \ + (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ + A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ + A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) + +static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) +{ + disable_irq(gmu->gmu_irq); + disable_irq(gmu->hfi_irq); + + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); +} + +static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) +{ + u32 val; + + /* Make sure there are no outstanding RPMh votes */ + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, + (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, + (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, + (val & 1), 100, 10000); + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, + (val & 1), 100, 1000); +} + +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, + bool gx_off) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if (!a6xx_has_gbif(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & + 0xf) == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + + return; + } + + if (gx_off) { + /* Halt the gx side of GBIF */ + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); + spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); + } + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* The GBIF halt needs to be explicitly cleared */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + +/* Force the GMU off in case it isn't responsive */ +static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + /* Flush all the queues */ + a6xx_hfi_stop(gmu); + + /* Stop the interrupts */ + a6xx_gmu_irq_disable(gmu); + + /* Force off SPTP in case the GMU is managing it */ + a6xx_sptprac_disable(gmu); + + /* Make sure there are no outstanding RPMh votes */ + a6xx_gmu_rpmh_off(gmu); + + /* Halt the gmu cm3 core */ + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); + + a6xx_bus_clear_pending_transactions(adreno_gpu, true); + + /* Reset GPU core blocks */ + gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1); + udelay(100); +} + +static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) +{ + struct dev_pm_opp *gpu_opp; + unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; + + gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); + if (IS_ERR(gpu_opp)) + return; + + gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ + a6xx_gmu_set_freq(gpu, gpu_opp, false); + dev_pm_opp_put(gpu_opp); +} + +static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) +{ + struct dev_pm_opp *gpu_opp; + unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; + + gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); + if (IS_ERR(gpu_opp)) + return; + + dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); + dev_pm_opp_put(gpu_opp); +} + +int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + int status, ret; + + if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) + return 0; + + gmu->hung = false; + + /* Turn on the resources */ + pm_runtime_get_sync(gmu->dev); + + /* + * "enable" the GX power domain which won't actually do anything but it + * will make sure that the refcounting is correct in case we need to + * bring down the GX after a GMU failure + */ + if (!IS_ERR_OR_NULL(gmu->gxpd)) + pm_runtime_get_sync(gmu->gxpd); + + /* Use a known rate to bring up the GMU */ + clk_set_rate(gmu->core_clk, 200000000); + clk_set_rate(gmu->hub_clk, 150000000); + ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); + if (ret) { + pm_runtime_put(gmu->gxpd); + pm_runtime_put(gmu->dev); + return ret; + } + + /* Set the bus quota to a reasonable value for boot */ + a6xx_gmu_set_initial_bw(gpu, gmu); + + /* Enable the GMU interrupt */ + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); + gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); + enable_irq(gmu->gmu_irq); + + /* Check to see if we are doing a cold or warm boot */ + status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? + GMU_WARM_BOOT : GMU_COLD_BOOT; + + /* + * Warm boot path does not work on newer GPUs + * Presumably this is because icache/dcache regions must be restored + */ + if (!gmu->legacy) + status = GMU_COLD_BOOT; + + ret = a6xx_gmu_fw_start(gmu, status); + if (ret) + goto out; + + ret = a6xx_hfi_start(gmu, status); + if (ret) + goto out; + + /* + * Turn on the GMU firmware fault interrupt after we know the boot + * sequence is successful + */ + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); + enable_irq(gmu->hfi_irq); + + /* Set the GPU to the current freq */ + a6xx_gmu_set_initial_freq(gpu, gmu); + +out: + /* On failure, shut down the GMU to leave it in a good state */ + if (ret) { + disable_irq(gmu->gmu_irq); + a6xx_rpmh_stop(gmu); + pm_runtime_put(gmu->gxpd); + pm_runtime_put(gmu->dev); + } + + return ret; +} + +bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) +{ + u32 reg; + + if (!gmu->initialized) + return true; + + reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); + + if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) + return false; + + return true; +} + +/* Gracefully try to shut down the GMU and by extension the GPU */ +static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 val; + + /* + * The GMU may still be in slumber unless the GPU started so check and + * skip putting it back into slumber if so + */ + val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + + if (val != 0xf) { + int ret = a6xx_gmu_wait_for_idle(gmu); + + /* If the GMU isn't responding assume it is hung */ + if (ret) { + a6xx_gmu_force_off(gmu); + return; + } + + a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); + + /* tell the GMU we want to slumber */ + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) { + a6xx_gmu_force_off(gmu); + return; + } + + ret = gmu_poll_timeout(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, + !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), + 100, 10000); + + /* + * Let the user know we failed to slumber but don't worry too + * much because we are powering down anyway + */ + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Unable to slumber GMU: status = 0%x/0%x\n", + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); + } + + /* Turn off HFI */ + a6xx_hfi_stop(gmu); + + /* Stop the interrupts and mask the hardware */ + a6xx_gmu_irq_disable(gmu); + + /* Tell RPMh to power off the GPU */ + a6xx_rpmh_stop(gmu); +} + + +int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) +{ + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct msm_gpu *gpu = &a6xx_gpu->base.base; + + if (!pm_runtime_active(gmu->dev)) + return 0; + + /* + * Force the GMU off if we detected a hang, otherwise try to shut it + * down gracefully + */ + if (gmu->hung) + a6xx_gmu_force_off(gmu); + else + a6xx_gmu_shutdown(gmu); + + /* Remove the bus vote */ + dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); + + /* + * Make sure the GX domain is off before turning off the GMU (CX) + * domain. Usually the GMU does this but only if the shutdown sequence + * was successful + */ + if (!IS_ERR_OR_NULL(gmu->gxpd)) + pm_runtime_put_sync(gmu->gxpd); + + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); + + pm_runtime_put_sync(gmu->dev); + + return 0; +} + +static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) +{ + msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); + msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); + msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); + msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); + msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); + msm_gem_kernel_put(gmu->log.obj, gmu->aspace); + + gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); + msm_gem_address_space_put(gmu->aspace); +} + +static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, + size_t size, u64 iova, const char *name) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct drm_device *dev = a6xx_gpu->base.base.dev; + uint32_t flags = MSM_BO_WC; + u64 range_start, range_end; + int ret; + + size = PAGE_ALIGN(size); + if (!iova) { + /* no fixed address - use GMU's uncached range */ + range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ + range_end = 0x80000000; + } else { + /* range for fixed address */ + range_start = iova; + range_end = iova + size; + /* use IOMMU_PRIV for icache/dcache */ + flags |= MSM_BO_MAP_PRIV; + } + + bo->obj = msm_gem_new(dev, size, flags); + if (IS_ERR(bo->obj)) + return PTR_ERR(bo->obj); + + ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, + range_start, range_end); + if (ret) { + drm_gem_object_put(bo->obj); + return ret; + } + + bo->virt = msm_gem_get_vaddr(bo->obj); + bo->size = size; + + msm_gem_object_set_name(bo->obj, name); + + return 0; +} + +static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) +{ + struct iommu_domain *domain; + struct msm_mmu *mmu; + + domain = iommu_domain_alloc(&platform_bus_type); + if (!domain) + return -ENODEV; + + mmu = msm_iommu_new(gmu->dev, domain); + gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); + if (IS_ERR(gmu->aspace)) { + iommu_domain_free(domain); + return PTR_ERR(gmu->aspace); + } + + return 0; +} + +/* Return the 'arc-level' for the given frequency */ +static unsigned int a6xx_gmu_get_arc_level(struct device *dev, + unsigned long freq) +{ + struct dev_pm_opp *opp; + unsigned int val; + + if (!freq) + return 0; + + opp = dev_pm_opp_find_freq_exact(dev, freq, true); + if (IS_ERR(opp)) + return 0; + + val = dev_pm_opp_get_level(opp); + + dev_pm_opp_put(opp); + + return val; +} + +static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, + unsigned long *freqs, int freqs_count, const char *id) +{ + int i, j; + const u16 *pri, *sec; + size_t pri_count, sec_count; + + pri = cmd_db_read_aux_data(id, &pri_count); + if (IS_ERR(pri)) + return PTR_ERR(pri); + /* + * The data comes back as an array of unsigned shorts so adjust the + * count accordingly + */ + pri_count >>= 1; + if (!pri_count) + return -EINVAL; + + sec = cmd_db_read_aux_data("mx.lvl", &sec_count); + if (IS_ERR(sec)) + return PTR_ERR(sec); + + sec_count >>= 1; + if (!sec_count) + return -EINVAL; + + /* Construct a vote for each frequency */ + for (i = 0; i < freqs_count; i++) { + u8 pindex = 0, sindex = 0; + unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); + + /* Get the primary index that matches the arc level */ + for (j = 0; j < pri_count; j++) { + if (pri[j] >= level) { + pindex = j; + break; + } + } + + if (j == pri_count) { + DRM_DEV_ERROR(dev, + "Level %u not found in the RPMh list\n", + level); + DRM_DEV_ERROR(dev, "Available levels:\n"); + for (j = 0; j < pri_count; j++) + DRM_DEV_ERROR(dev, " %u\n", pri[j]); + + return -EINVAL; + } + + /* + * Look for a level in in the secondary list that matches. If + * nothing fits, use the maximum non zero vote + */ + + for (j = 0; j < sec_count; j++) { + if (sec[j] >= level) { + sindex = j; + break; + } else if (sec[j]) { + sindex = j; + } + } + + /* Construct the vote */ + votes[i] = ((pri[pindex] & 0xffff) << 16) | + (sindex << 8) | pindex; + } + + return 0; +} + +/* + * The GMU votes with the RPMh for itself and on behalf of the GPU but we need + * to construct the list of votes on the CPU and send it over. Query the RPMh + * voltage levels and build the votes + */ + +static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + int ret; + + /* Build the GX votes */ + ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, + gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); + + /* Build the CX votes */ + ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, + gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); + + return ret; +} + +static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, + u32 size) +{ + int count = dev_pm_opp_get_opp_count(dev); + struct dev_pm_opp *opp; + int i, index = 0; + unsigned long freq = 1; + + /* + * The OPP table doesn't contain the "off" frequency level so we need to + * add 1 to the table size to account for it + */ + + if (WARN(count + 1 > size, + "The GMU frequency table is being truncated\n")) + count = size - 1; + + /* Set the "off" frequency */ + freqs[index++] = 0; + + for (i = 0; i < count; i++) { + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + break; + + dev_pm_opp_put(opp); + freqs[index++] = freq++; + } + + return index; +} + +static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + + int ret = 0; + + /* + * The GMU handles its own frequency switching so build a list of + * available frequencies to send during initialization + */ + ret = devm_pm_opp_of_add_table(gmu->dev); + if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); + return ret; + } + + gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, + gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); + + /* + * The GMU also handles GPU frequency switching so build a list + * from the GPU OPP table + */ + gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, + gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); + + gmu->current_perf_index = gmu->nr_gpu_freqs - 1; + + /* Build the list of RPMh votes that we'll send to the GMU */ + return a6xx_gmu_rpmh_votes_init(gmu); +} + +static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) +{ + int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); + + if (ret < 1) + return ret; + + gmu->nr_clocks = ret; + + gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, + gmu->nr_clocks, "gmu"); + + gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, + gmu->nr_clocks, "hub"); + + return 0; +} + +static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, + const char *name) +{ + void __iomem *ret; + struct resource *res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, name); + + if (!res) { + DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); + return ERR_PTR(-EINVAL); + } + + ret = ioremap(res->start, resource_size(res)); + if (!ret) { + DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); + return ERR_PTR(-EINVAL); + } + + return ret; +} + +static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, + const char *name, irq_handler_t handler) +{ + int irq, ret; + + irq = platform_get_irq_byname(pdev, name); + + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", + name, ret); + return ret; + } + + disable_irq(irq); + + return irq; +} + +void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) +{ + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct platform_device *pdev = to_platform_device(gmu->dev); + + if (!gmu->initialized) + return; + + pm_runtime_force_suspend(gmu->dev); + + if (!IS_ERR_OR_NULL(gmu->gxpd)) { + pm_runtime_disable(gmu->gxpd); + dev_pm_domain_detach(gmu->gxpd, false); + } + + iounmap(gmu->mmio); + if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) + iounmap(gmu->rscc); + gmu->mmio = NULL; + gmu->rscc = NULL; + + a6xx_gmu_memory_free(gmu); + + free_irq(gmu->gmu_irq, gmu); + free_irq(gmu->hfi_irq, gmu); + + /* Drop reference taken in of_find_device_by_node */ + put_device(gmu->dev); + + gmu->initialized = false; +} + +int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct platform_device *pdev = of_find_device_by_node(node); + int ret; + + if (!pdev) + return -ENODEV; + + mutex_init(&gmu->lock); + + gmu->dev = &pdev->dev; + + of_dma_configure(gmu->dev, node, true); + + /* Fow now, don't do anything fancy until we get our feet under us */ + gmu->idle_level = GMU_IDLE_STATE_ACTIVE; + + pm_runtime_enable(gmu->dev); + + /* Get the list of clocks */ + ret = a6xx_gmu_clocks_probe(gmu); + if (ret) + goto err_put_device; + + ret = a6xx_gmu_memory_probe(gmu); + if (ret) + goto err_put_device; + + + /* A660 now requires handling "prealloc requests" in GMU firmware + * For now just hardcode allocations based on the known firmware. + * note: there is no indication that these correspond to "dummy" or + * "debug" regions, but this "guess" allows reusing these BOs which + * are otherwise unused by a660. + */ + gmu->dummy.size = SZ_4K; + if (adreno_is_a660_family(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, + 0x60400000, "debug"); + if (ret) + goto err_memory; + + gmu->dummy.size = SZ_8K; + } + + /* Allocate memory for the GMU dummy page */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, + 0x60000000, "dummy"); + if (ret) + goto err_memory; + + /* Note that a650 family also includes a660 family: */ + if (adreno_is_a650_family(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, + SZ_16M - SZ_16K, 0x04000, "icache"); + if (ret) + goto err_memory; + /* + * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition + * to allocate icache/dcache here, as per downstream code flow, but it may not actually be + * necessary. If you omit this step and you don't get random pagefaults, you are likely + * good to go without this! + */ + } else if (adreno_is_a640_family(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, + SZ_256K - SZ_16K, 0x04000, "icache"); + if (ret) + goto err_memory; + + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, + SZ_256K - SZ_16K, 0x44000, "dcache"); + if (ret) + goto err_memory; + } else if (adreno_is_a630(adreno_gpu) || adreno_is_a615_family(adreno_gpu)) { + /* HFI v1, has sptprac */ + gmu->legacy = true; + + /* Allocate memory for the GMU debug region */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); + if (ret) + goto err_memory; + } + + /* Allocate memory for for the HFI queues */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); + if (ret) + goto err_memory; + + /* Allocate memory for the GMU log region */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log"); + if (ret) + goto err_memory; + + /* Map the GMU registers */ + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); + if (IS_ERR(gmu->mmio)) { + ret = PTR_ERR(gmu->mmio); + goto err_memory; + } + + if (adreno_is_a650_family(adreno_gpu)) { + gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); + if (IS_ERR(gmu->rscc)) + goto err_mmio; + } else { + gmu->rscc = gmu->mmio + 0x23000; + } + + /* Get the HFI and GMU interrupts */ + gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); + gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); + + if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) + goto err_mmio; + + /* + * Get a link to the GX power domain to reset the GPU in case of GMU + * crash + */ + gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); + + /* Get the power levels for the GMU and GPU */ + a6xx_gmu_pwrlevels_probe(gmu); + + /* Set up the HFI queues */ + a6xx_hfi_init(gmu); + + gmu->initialized = true; + + return 0; + +err_mmio: + iounmap(gmu->mmio); + if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) + iounmap(gmu->rscc); + free_irq(gmu->gmu_irq, gmu); + free_irq(gmu->hfi_irq, gmu); + + ret = -ENODEV; + +err_memory: + a6xx_gmu_memory_free(gmu); +err_put_device: + /* Drop reference taken in of_find_device_by_node */ + put_device(gmu->dev); + + return ret; +} diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h new file mode 100644 index 000000000..e034935b3 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */ + +#ifndef _A6XX_GMU_H_ +#define _A6XX_GMU_H_ + +#include +#include +#include "msm_drv.h" +#include "a6xx_hfi.h" + +struct a6xx_gmu_bo { + struct drm_gem_object *obj; + void *virt; + size_t size; + u64 iova; +}; + +/* + * These define the different GMU wake up options - these define how both the + * CPU and the GMU bring up the hardware + */ + +/* THe GMU has already been booted and the rentention registers are active */ +#define GMU_WARM_BOOT 0 + +/* the GMU is coming up for the first time or back from a power collapse */ +#define GMU_COLD_BOOT 1 + +/* + * These define the level of control that the GMU has - the higher the number + * the more things that the GMU hardware controls on its own. + */ + +/* The GMU does not do any idle state management */ +#define GMU_IDLE_STATE_ACTIVE 0 + +/* The GMU manages SPTP power collapse */ +#define GMU_IDLE_STATE_SPTP 2 + +/* The GMU does automatic IFPC (intra-frame power collapse) */ +#define GMU_IDLE_STATE_IFPC 3 + +struct a6xx_gmu { + struct device *dev; + + /* For serializing communication with the GMU: */ + struct mutex lock; + + struct msm_gem_address_space *aspace; + + void * __iomem mmio; + void * __iomem rscc; + + int hfi_irq; + int gmu_irq; + + struct device *gxpd; + + int idle_level; + + struct a6xx_gmu_bo hfi; + struct a6xx_gmu_bo debug; + struct a6xx_gmu_bo icache; + struct a6xx_gmu_bo dcache; + struct a6xx_gmu_bo dummy; + struct a6xx_gmu_bo log; + + int nr_clocks; + struct clk_bulk_data *clocks; + struct clk *core_clk; + struct clk *hub_clk; + + /* current performance index set externally */ + int current_perf_index; + + int nr_gpu_freqs; + unsigned long gpu_freqs[16]; + u32 gx_arc_votes[16]; + + int nr_gmu_freqs; + unsigned long gmu_freqs[4]; + u32 cx_arc_votes[4]; + + unsigned long freq; + + struct a6xx_hfi_queue queues[2]; + + bool initialized; + bool hung; + bool legacy; /* a618 or a630 */ +}; + +static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) +{ + return msm_readl(gmu->mmio + (offset << 2)); +} + +static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) +{ + msm_writel(value, gmu->mmio + (offset << 2)); +} + +static inline void +gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size) +{ + memcpy_toio(gmu->mmio + (offset << 2), data, size); + wmb(); +} + +static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) +{ + u32 val = gmu_read(gmu, reg); + + val &= ~mask; + + gmu_write(gmu, reg, val | or); +} + +static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) +{ + u64 val; + + val = (u64) msm_readl(gmu->mmio + (lo << 2)); + val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32); + + return val; +} + +#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \ + readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \ + interval, timeout) + +static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset) +{ + return msm_readl(gmu->rscc + (offset << 2)); +} + +static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) +{ + msm_writel(value, gmu->rscc + (offset << 2)); +} + +#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \ + readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \ + interval, timeout) + +/* + * These are the available OOB (out of band requests) to the GMU where "out of + * band" means that the CPU talks to the GMU directly and not through HFI. + * Normally this works by writing a ITCM/DTCM register and then triggering a + * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack" + * bit). The state is cleared by writing the "clear' bit to the GMU interrupt. + * + * These are used to force the GMU/GPU to stay on during a critical sequence or + * for hardware workarounds. + */ + +enum a6xx_gmu_oob_state { + /* + * Let the GMU know that a boot or slumber operation has started. The value in + * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are + * doing + */ + GMU_OOB_BOOT_SLUMBER = 0, + /* + * Let the GMU know to not turn off any GPU registers while the CPU is in a + * critical section + */ + GMU_OOB_GPU_SET, + /* + * Set a new power level for the GPU when the CPU is doing frequency scaling + */ + GMU_OOB_DCVS_SET, + /* + * Used to keep the GPU on for CPU-side reads of performance counters. + */ + GMU_OOB_PERFCOUNTER_SET, +}; + +void a6xx_hfi_init(struct a6xx_gmu *gmu); +int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state); +void a6xx_hfi_stop(struct a6xx_gmu *gmu); +int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu); +int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index); + +bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu); +bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu); + +#endif diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h new file mode 100644 index 000000000..4a3230978 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h @@ -0,0 +1,483 @@ +#ifndef A6XX_GMU_XML +#define A6XX_GMU_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK 0x00800000 +#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT 23 +static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB(uint32_t val) +{ + return ((val) << A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK; +} +#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK 0x40000000 +#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT 30 +static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB(uint32_t val) +{ + return ((val) << A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK; +} +#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK 0x00400000 +#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT 22 +static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK; +} +#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK 0x40000000 +#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT 30 +static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK; +} +#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK 0x40000000 +#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT 30 +static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK; +} +#define A6XX_GMU_OOB_DCVS_SET_MASK__MASK 0x00800000 +#define A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT 23 +static inline uint32_t A6XX_GMU_OOB_DCVS_SET_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_SET_MASK__MASK; +} +#define A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK 0x80000000 +#define A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT 31 +static inline uint32_t A6XX_GMU_OOB_DCVS_CHECK_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK; +} +#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK 0x80000000 +#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT 31 +static inline uint32_t A6XX_GMU_OOB_DCVS_CLEAR_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK; +} +#define A6XX_GMU_OOB_GPU_SET_MASK__MASK 0x00040000 +#define A6XX_GMU_OOB_GPU_SET_MASK__SHIFT 18 +static inline uint32_t A6XX_GMU_OOB_GPU_SET_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_GPU_SET_MASK__SHIFT) & A6XX_GMU_OOB_GPU_SET_MASK__MASK; +} +#define A6XX_GMU_OOB_GPU_CHECK_MASK__MASK 0x04000000 +#define A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT 26 +static inline uint32_t A6XX_GMU_OOB_GPU_CHECK_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CHECK_MASK__MASK; +} +#define A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK 0x04000000 +#define A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT 26 +static inline uint32_t A6XX_GMU_OOB_GPU_CLEAR_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK; +} +#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK 0x00020000 +#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT 17 +static inline uint32_t A6XX_GMU_OOB_PERFCNTR_SET_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK; +} +#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK 0x02000000 +#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT 25 +static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CHECK_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK; +} +#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK 0x02000000 +#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT 25 +static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK(uint32_t val) +{ + return ((val) << A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK; +} +#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001 +#define A6XX_HFI_IRQ_DSGQ_MASK__MASK 0x00000002 +#define A6XX_HFI_IRQ_DSGQ_MASK__SHIFT 1 +static inline uint32_t A6XX_HFI_IRQ_DSGQ_MASK(uint32_t val) +{ + return ((val) << A6XX_HFI_IRQ_DSGQ_MASK__SHIFT) & A6XX_HFI_IRQ_DSGQ_MASK__MASK; +} +#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK 0x00000004 +#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT 2 +static inline uint32_t A6XX_HFI_IRQ_BLOCKED_MSG_MASK(uint32_t val) +{ + return ((val) << A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT) & A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK; +} +#define A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK 0x00800000 +#define A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT 23 +static inline uint32_t A6XX_HFI_IRQ_CM3_FAULT_MASK(uint32_t val) +{ + return ((val) << A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT) & A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK; +} +#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000 +#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16 +static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val) +{ + return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK; +} +#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000 +#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24 +static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val) +{ + return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK; +} +#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001 +#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080 + +#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081 + +#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00 + +#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00 + +#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0 + +#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8 + +#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9 + +#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa + +#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc + +#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd + +#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe + +#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff + +#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00 + +#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01 + +#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f + +#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000 + +#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001 + +#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a + +#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c + +#define REG_A6XX_GMU_CM3_CFG 0x0000502d + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049 + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e + +#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f + +#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0 +#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001 +#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002 +#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004 +#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00 +#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10 +static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val) +{ + return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK; +} +#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000 +#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14 +static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val) +{ + return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK; +} + +#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1 + +#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2 + +#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040 +#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080 + +#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4 +#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001 +#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0 +#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4 +static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val) +{ + return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK; +} + +#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8 +#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001 +#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010 +#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100 +#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200 +#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400 +#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800 +#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000 +#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000 +#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000 +#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000 + +#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9 + +#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec + +#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0 + +#define REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF 0x000050f1 + +#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100 + +#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101 + +#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0 + +#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157 + +#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158 + +#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088 + +#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089 + +#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3 + +#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180 + +#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181 + +#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182 + +#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183 + +#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184 + +#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185 + +#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186 + +#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190 + +#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191 + +#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192 +#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001 +#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000 + +#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193 + +#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194 + +#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195 + +#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196 + +#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197 + +#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198 + +#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199 + +#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a + +#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b + +#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c + +#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d + +#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e + +#define REG_A6XX_GMU_GENERAL_1 0x000051c6 + +#define REG_A6XX_GMU_GENERAL_7 0x000051cc + +#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d + +#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920 + +#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578 + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558 + +#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580 + +#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada + +#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957 + +#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821 + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965 + +#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d + +#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965 + +#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d + +#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303 + +#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304 + +#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305 +#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001 +#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002 +#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004 +#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008 +#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010 +#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020 + +#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306 + +#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309 + +#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a + +#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b + +#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c +#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000 + +#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d + +#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e + +#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310 + +#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313 + +#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315 + +#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316 + +#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307 + +#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308 + +#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311 + +#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312 + +#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03 + +#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42 + +#define REG_A6XX_GPU_CPR_FSM_CTL 0x0000c001 + +#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004 + +#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008 + +#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009 + +#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a + +#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b + +#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d + +#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e + +#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082 + +#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083 + +#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089 + +#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c + +#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100 + +#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101 + +#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180 + +#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346 + +#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee + +#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496 + +#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e + + +#endif /* A6XX_GMU_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c new file mode 100644 index 000000000..95e73eddc --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -0,0 +1,2078 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ + + +#include "msm_gem.h" +#include "msm_mmu.h" +#include "msm_gpu_trace.h" +#include "a6xx_gpu.h" +#include "a6xx_gmu.xml.h" + +#include +#include +#include +#include + +#define GPU_PAS_ID 13 + +static inline bool _a6xx_check_idle(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + /* Check that the GMU is idle */ + if (!a6xx_gmu_isidle(&a6xx_gpu->gmu)) + return false; + + /* Check tha the CX master is idle */ + if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & + ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER) + return false; + + return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & + A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT); +} + +static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + /* wait for CP to drain ringbuffer: */ + if (!adreno_idle(gpu, ring)) + return false; + + if (spin_until(_a6xx_check_idle(gpu))) { + DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", + gpu->name, __builtin_return_address(0), + gpu_read(gpu, REG_A6XX_RBBM_STATUS), + gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS), + gpu_read(gpu, REG_A6XX_CP_RB_RPTR), + gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); + return false; + } + + return true; +} + +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */ + if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { + OUT_PKT7(ring, CP_WHERE_AM_I, 2); + OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring))); + OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring))); + } +} + +static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + uint32_t wptr; + unsigned long flags; + + update_shadow_rptr(gpu, ring); + + spin_lock_irqsave(&ring->preempt_lock, flags); + + /* Copy the shadow to the actual register */ + ring->cur = ring->next; + + /* Make sure to wrap wptr if we need to */ + wptr = get_wptr(ring); + + spin_unlock_irqrestore(&ring->preempt_lock, flags); + + /* Make sure everything is posted before making a decision */ + mb(); + + gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); +} + +static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter, + u64 iova) +{ + OUT_PKT7(ring, CP_REG_TO_MEM, 3); + OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) | + CP_REG_TO_MEM_0_CNT(2) | + CP_REG_TO_MEM_0_64B); + OUT_RING(ring, lower_32_bits(iova)); + OUT_RING(ring, upper_32_bits(iova)); +} + +static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, + struct msm_ringbuffer *ring, struct msm_file_private *ctx) +{ + bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; + phys_addr_t ttbr; + u32 asid; + u64 memptr = rbmemptr(ring, ttbr0); + + if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) + return; + + if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) + return; + + if (!sysprof) { + /* Turn off protected mode to write to special registers */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1); + OUT_RING(ring, 1); + } + + /* Execute the table update */ + OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr))); + + OUT_RING(ring, + CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) | + CP_SMMU_TABLE_UPDATE_1_ASID(asid)); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0)); + OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0)); + + /* + * Write the new TTBR0 to the memstore. This is good for debugging. + */ + OUT_PKT7(ring, CP_MEM_WRITE, 4); + OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr))); + OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr))); + OUT_RING(ring, lower_32_bits(ttbr)); + OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr)); + + /* + * And finally, trigger a uche flush to be sure there isn't anything + * lingering in that part of the GPU + */ + + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, CACHE_INVALIDATE); + + if (!sysprof) { + /* + * Wait for SRAM clear after the pgtable update, so the + * two can happen in parallel: + */ + OUT_PKT7(ring, CP_WAIT_REG_MEM, 6); + OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ)); + OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO( + REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS)); + OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0)); + OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1)); + OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1)); + OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0)); + + /* Re-enable protected mode: */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + } +} + +static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = submit->ring; + unsigned int i, ibs = 0; + + a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); + + get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), + rbmemptr_stats(ring, index, cpcycles_start)); + + /* + * For PM4 the GMU register offsets are calculated from the base of the + * GPU registers so we need to add 0x1a800 to the register value on A630 + * to get the right value from PM4. + */ + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, + rbmemptr_stats(ring, index, alwayson_start)); + + /* Invalidate CCU depth and color */ + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH)); + + OUT_PKT7(ring, CP_EVENT_WRITE, 1); + OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR)); + + /* Submit the commands */ + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) + break; + fallthrough; + case MSM_SUBMIT_CMD_BUF: + OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); + OUT_RING(ring, submit->cmd[i].size); + ibs++; + break; + } + + /* + * Periodically update shadow-wptr if needed, so that we + * can see partial progress of submits with large # of + * cmds.. otherwise we could needlessly stall waiting for + * ringbuffer state, simply due to looking at a shadow + * rptr value that has not been updated + */ + if ((ibs % 32) == 0) + update_shadow_rptr(gpu, ring); + } + + get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0), + rbmemptr_stats(ring, index, cpcycles_end)); + get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, + rbmemptr_stats(ring, index, alwayson_end)); + + /* Write the fence to the scratch register */ + OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1); + OUT_RING(ring, submit->seqno); + + /* + * Execute a CACHE_FLUSH_TS event. This will ensure that the + * timestamp is written to the memory and then triggers the interrupt + */ + OUT_PKT7(ring, CP_EVENT_WRITE, 4); + OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) | + CP_EVENT_WRITE_0_IRQ); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, submit->seqno); + + trace_msm_gpu_submit_flush(submit, + gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO)); + + a6xx_flush(gpu, ring); +} + +/* For a615 family (a615, a616, a618 and a619) */ +const struct adreno_reglist a615_hwcg[] = { + {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002020}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, + {}, +}; + +const struct adreno_reglist a630_hwcg[] = { + {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf}, + {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf}, + {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf}, + {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00}, + {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, + {}, +}; + +const struct adreno_reglist a640_hwcg[] = { + {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A6XX_RBBM_ISDB_CNT, 0x00000182}, + {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000}, + {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, + {}, +}; + +const struct adreno_reglist a650_hwcg[] = { + {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777}, + {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A6XX_RBBM_ISDB_CNT, 0x00000182}, + {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000}, + {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, + {}, +}; + +const struct adreno_reglist a660_hwcg[] = { + {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, + {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080}, + {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF}, + {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111}, + {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111}, + {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777}, + {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777}, + {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222}, + {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220}, + {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00}, + {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022}, + {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555}, + {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011}, + {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044}, + {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222}, + {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002}, + {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000}, + {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200}, + {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004}, + {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222}, + {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004}, + {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002}, + {REG_A6XX_RBBM_ISDB_CNT, 0x00000182}, + {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000}, + {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000}, + {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, + {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, + {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, + {}, +}; + +static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + const struct adreno_reglist *reg; + unsigned int i; + u32 val, clock_cntl_on; + + if (!adreno_gpu->info->hwcg) + return; + + if (adreno_is_a630(adreno_gpu)) + clock_cntl_on = 0x8aa8aa02; + else + clock_cntl_on = 0x8aa8aa82; + + val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); + + /* Don't re-program the registers if they are already correct */ + if ((!state && !val) || (state && (val == clock_cntl_on))) + return; + + /* Disable SP clock before programming HWCG registers */ + gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); + + for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++) + gpu_write(gpu, reg->offset, state ? reg->value : 0); + + /* Enable SP clock */ + gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); + + gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0); +} + +/* For a615, a616, a618, a619, a630, a640 and a680 */ +static const u32 a6xx_protect[] = { + A6XX_PROTECT_RDONLY(0x00000, 0x04ff), + A6XX_PROTECT_RDONLY(0x00501, 0x0005), + A6XX_PROTECT_RDONLY(0x0050b, 0x02f4), + A6XX_PROTECT_NORDWR(0x0050e, 0x0000), + A6XX_PROTECT_NORDWR(0x00510, 0x0000), + A6XX_PROTECT_NORDWR(0x00534, 0x0000), + A6XX_PROTECT_NORDWR(0x00800, 0x0082), + A6XX_PROTECT_NORDWR(0x008a0, 0x0008), + A6XX_PROTECT_NORDWR(0x008ab, 0x0024), + A6XX_PROTECT_RDONLY(0x008de, 0x00ae), + A6XX_PROTECT_NORDWR(0x00900, 0x004d), + A6XX_PROTECT_NORDWR(0x0098d, 0x0272), + A6XX_PROTECT_NORDWR(0x00e00, 0x0001), + A6XX_PROTECT_NORDWR(0x00e03, 0x000c), + A6XX_PROTECT_NORDWR(0x03c00, 0x00c3), + A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff), + A6XX_PROTECT_NORDWR(0x08630, 0x01cf), + A6XX_PROTECT_NORDWR(0x08e00, 0x0000), + A6XX_PROTECT_NORDWR(0x08e08, 0x0000), + A6XX_PROTECT_NORDWR(0x08e50, 0x001f), + A6XX_PROTECT_NORDWR(0x09624, 0x01db), + A6XX_PROTECT_NORDWR(0x09e70, 0x0001), + A6XX_PROTECT_NORDWR(0x09e78, 0x0187), + A6XX_PROTECT_NORDWR(0x0a630, 0x01cf), + A6XX_PROTECT_NORDWR(0x0ae02, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae50, 0x032f), + A6XX_PROTECT_NORDWR(0x0b604, 0x0000), + A6XX_PROTECT_NORDWR(0x0be02, 0x0001), + A6XX_PROTECT_NORDWR(0x0be20, 0x17df), + A6XX_PROTECT_NORDWR(0x0f000, 0x0bff), + A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff), + A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */ +}; + +/* These are for a620 and a650 */ +static const u32 a650_protect[] = { + A6XX_PROTECT_RDONLY(0x00000, 0x04ff), + A6XX_PROTECT_RDONLY(0x00501, 0x0005), + A6XX_PROTECT_RDONLY(0x0050b, 0x02f4), + A6XX_PROTECT_NORDWR(0x0050e, 0x0000), + A6XX_PROTECT_NORDWR(0x00510, 0x0000), + A6XX_PROTECT_NORDWR(0x00534, 0x0000), + A6XX_PROTECT_NORDWR(0x00800, 0x0082), + A6XX_PROTECT_NORDWR(0x008a0, 0x0008), + A6XX_PROTECT_NORDWR(0x008ab, 0x0024), + A6XX_PROTECT_RDONLY(0x008de, 0x00ae), + A6XX_PROTECT_NORDWR(0x00900, 0x004d), + A6XX_PROTECT_NORDWR(0x0098d, 0x0272), + A6XX_PROTECT_NORDWR(0x00e00, 0x0001), + A6XX_PROTECT_NORDWR(0x00e03, 0x000c), + A6XX_PROTECT_NORDWR(0x03c00, 0x00c3), + A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff), + A6XX_PROTECT_NORDWR(0x08630, 0x01cf), + A6XX_PROTECT_NORDWR(0x08e00, 0x0000), + A6XX_PROTECT_NORDWR(0x08e08, 0x0000), + A6XX_PROTECT_NORDWR(0x08e50, 0x001f), + A6XX_PROTECT_NORDWR(0x08e80, 0x027f), + A6XX_PROTECT_NORDWR(0x09624, 0x01db), + A6XX_PROTECT_NORDWR(0x09e60, 0x0011), + A6XX_PROTECT_NORDWR(0x09e78, 0x0187), + A6XX_PROTECT_NORDWR(0x0a630, 0x01cf), + A6XX_PROTECT_NORDWR(0x0ae02, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae50, 0x032f), + A6XX_PROTECT_NORDWR(0x0b604, 0x0000), + A6XX_PROTECT_NORDWR(0x0b608, 0x0007), + A6XX_PROTECT_NORDWR(0x0be02, 0x0001), + A6XX_PROTECT_NORDWR(0x0be20, 0x17df), + A6XX_PROTECT_NORDWR(0x0f000, 0x0bff), + A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff), + A6XX_PROTECT_NORDWR(0x18400, 0x1fff), + A6XX_PROTECT_NORDWR(0x1a800, 0x1fff), + A6XX_PROTECT_NORDWR(0x1f400, 0x0443), + A6XX_PROTECT_RDONLY(0x1f844, 0x007b), + A6XX_PROTECT_NORDWR(0x1f887, 0x001b), + A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */ +}; + +/* These are for a635 and a660 */ +static const u32 a660_protect[] = { + A6XX_PROTECT_RDONLY(0x00000, 0x04ff), + A6XX_PROTECT_RDONLY(0x00501, 0x0005), + A6XX_PROTECT_RDONLY(0x0050b, 0x02f4), + A6XX_PROTECT_NORDWR(0x0050e, 0x0000), + A6XX_PROTECT_NORDWR(0x00510, 0x0000), + A6XX_PROTECT_NORDWR(0x00534, 0x0000), + A6XX_PROTECT_NORDWR(0x00800, 0x0082), + A6XX_PROTECT_NORDWR(0x008a0, 0x0008), + A6XX_PROTECT_NORDWR(0x008ab, 0x0024), + A6XX_PROTECT_RDONLY(0x008de, 0x00ae), + A6XX_PROTECT_NORDWR(0x00900, 0x004d), + A6XX_PROTECT_NORDWR(0x0098d, 0x0272), + A6XX_PROTECT_NORDWR(0x00e00, 0x0001), + A6XX_PROTECT_NORDWR(0x00e03, 0x000c), + A6XX_PROTECT_NORDWR(0x03c00, 0x00c3), + A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff), + A6XX_PROTECT_NORDWR(0x08630, 0x01cf), + A6XX_PROTECT_NORDWR(0x08e00, 0x0000), + A6XX_PROTECT_NORDWR(0x08e08, 0x0000), + A6XX_PROTECT_NORDWR(0x08e50, 0x001f), + A6XX_PROTECT_NORDWR(0x08e80, 0x027f), + A6XX_PROTECT_NORDWR(0x09624, 0x01db), + A6XX_PROTECT_NORDWR(0x09e60, 0x0011), + A6XX_PROTECT_NORDWR(0x09e78, 0x0187), + A6XX_PROTECT_NORDWR(0x0a630, 0x01cf), + A6XX_PROTECT_NORDWR(0x0ae02, 0x0000), + A6XX_PROTECT_NORDWR(0x0ae50, 0x012f), + A6XX_PROTECT_NORDWR(0x0b604, 0x0000), + A6XX_PROTECT_NORDWR(0x0b608, 0x0006), + A6XX_PROTECT_NORDWR(0x0be02, 0x0001), + A6XX_PROTECT_NORDWR(0x0be20, 0x015f), + A6XX_PROTECT_NORDWR(0x0d000, 0x05ff), + A6XX_PROTECT_NORDWR(0x0f000, 0x0bff), + A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff), + A6XX_PROTECT_NORDWR(0x18400, 0x1fff), + A6XX_PROTECT_NORDWR(0x1a400, 0x1fff), + A6XX_PROTECT_NORDWR(0x1f400, 0x0443), + A6XX_PROTECT_RDONLY(0x1f844, 0x007b), + A6XX_PROTECT_NORDWR(0x1f860, 0x0000), + A6XX_PROTECT_NORDWR(0x1f887, 0x001b), + A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */ +}; + +static void a6xx_set_cp_protect(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + const u32 *regs = a6xx_protect; + unsigned i, count, count_max; + + if (adreno_is_a650(adreno_gpu)) { + regs = a650_protect; + count = ARRAY_SIZE(a650_protect); + count_max = 48; + BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48); + } else if (adreno_is_a660_family(adreno_gpu)) { + regs = a660_protect; + count = ARRAY_SIZE(a660_protect); + count_max = 48; + BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48); + } else { + regs = a6xx_protect; + count = ARRAY_SIZE(a6xx_protect); + count_max = 32; + BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32); + } + + /* + * Enable access protection to privileged registers, fault on an access + * protect violation and select the last span to protect from the start + * address all the way to the end of the register address space + */ + gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3)); + + for (i = 0; i < count - 1; i++) + gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]); + /* last CP_PROTECT to have "infinite" length on the last entry */ + gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]); +} + +static void a6xx_set_ubwc_config(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + u32 lower_bit = 2; + u32 amsbc = 0; + u32 rgb565_predicator = 0; + u32 uavflagprd_inv = 0; + + /* a618 is using the hw default values */ + if (adreno_is_a618(adreno_gpu)) + return; + + if (adreno_is_a640_family(adreno_gpu)) + amsbc = 1; + + if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) { + /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */ + lower_bit = 3; + amsbc = 1; + rgb565_predicator = 1; + uavflagprd_inv = 2; + } + + if (adreno_is_7c3(adreno_gpu)) { + lower_bit = 1; + amsbc = 1; + rgb565_predicator = 1; + uavflagprd_inv = 2; + } + + gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, + rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1); + gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1); + gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, + uavflagprd_inv << 4 | lower_bit << 1); + gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21); +} + +static int a6xx_cp_init(struct msm_gpu *gpu) +{ + struct msm_ringbuffer *ring = gpu->rb[0]; + + OUT_PKT7(ring, CP_ME_INIT, 8); + + OUT_RING(ring, 0x0000002f); + + /* Enable multiple hardware contexts */ + OUT_RING(ring, 0x00000003); + + /* Enable error detection */ + OUT_RING(ring, 0x20000000); + + /* Don't enable header dump */ + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + /* No workarounds enabled */ + OUT_RING(ring, 0x00000000); + + /* Pad rest of the cmds with 0's */ + OUT_RING(ring, 0x00000000); + OUT_RING(ring, 0x00000000); + + a6xx_flush(gpu, ring); + return a6xx_idle(gpu, ring) ? 0 : -EINVAL; +} + +/* + * Check that the microcode version is new enough to include several key + * security fixes. Return true if the ucode is safe. + */ +static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, + struct drm_gem_object *obj) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; + u32 *buf = msm_gem_get_vaddr(obj); + bool ret = false; + + if (IS_ERR(buf)) + return false; + + /* + * Targets up to a640 (a618, a630 and a640) need to check for a + * microcode version that is patched to support the whereami opcode or + * one that is new enough to include it by default. + * + * a650 tier targets don't need whereami but still need to be + * equal to or newer than 0.95 for other security fixes + * + * a660 targets have all the critical security fixes from the start + */ + if (!strcmp(sqe_name, "a630_sqe.fw")) { + /* + * If the lowest nibble is 0xa that is an indication that this + * microcode has been patched. The actual version is in dword + * [3] but we only care about the patchlevel which is the lowest + * nibble of dword [3] + * + * Otherwise check that the firmware is greater than or equal + * to 1.90 which was the first version that had this fix built + * in + */ + if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) || + (buf[0] & 0xfff) >= 0x190) { + a6xx_gpu->has_whereami = true; + ret = true; + goto out; + } + + DRM_DEV_ERROR(&gpu->pdev->dev, + "a630 SQE ucode is too old. Have version %x need at least %x\n", + buf[0] & 0xfff, 0x190); + } else if (!strcmp(sqe_name, "a650_sqe.fw")) { + if ((buf[0] & 0xfff) >= 0x095) { + ret = true; + goto out; + } + + DRM_DEV_ERROR(&gpu->pdev->dev, + "a650 SQE ucode is too old. Have version %x need at least %x\n", + buf[0] & 0xfff, 0x095); + } else if (!strcmp(sqe_name, "a660_sqe.fw")) { + ret = true; + } else { + DRM_DEV_ERROR(&gpu->pdev->dev, + "unknown GPU, add it to a6xx_ucode_check_version()!!\n"); + } +out: + msm_gem_put_vaddr(obj); + return ret; +} + +static int a6xx_ucode_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + if (!a6xx_gpu->sqe_bo) { + a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, + adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova); + + if (IS_ERR(a6xx_gpu->sqe_bo)) { + int ret = PTR_ERR(a6xx_gpu->sqe_bo); + + a6xx_gpu->sqe_bo = NULL; + DRM_DEV_ERROR(&gpu->pdev->dev, + "Could not allocate SQE ucode: %d\n", ret); + + return ret; + } + + msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); + if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + drm_gem_object_put(a6xx_gpu->sqe_bo); + + a6xx_gpu->sqe_bo = NULL; + return -EPERM; + } + } + + gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); + + return 0; +} + +static int a6xx_zap_shader_init(struct msm_gpu *gpu) +{ + static bool loaded; + int ret; + + if (loaded) + return 0; + + ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); + + loaded = !ret; + return ret; +} + +#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \ + A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A6XX_RBBM_INT_0_MASK_CP_IB2 | \ + A6XX_RBBM_INT_0_MASK_CP_IB1 | \ + A6XX_RBBM_INT_0_MASK_CP_RB | \ + A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ + A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \ + A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \ + A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ + A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) + +static int hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int ret; + + /* Make sure the GMU keeps the GPU on while we set it up */ + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + + /* Clear GBIF halt in case GX domain was not collapsed */ + if (a6xx_has_gbif(adreno_gpu)) + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); + + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); + + /* + * Disable the trusted memory range - we don't actually supported secure + * memory rendering at this point in time and we don't want to block off + * part of the virtual memory space. + */ + gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); + + /* Turn on 64 bit addressing for all blocks */ + gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); + + /* enable hardware clockgating */ + a6xx_set_hwcg(gpu, true); + + /* VBIF/GBIF start*/ + if (adreno_is_a640_family(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); + gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3); + } else { + gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); + } + + if (adreno_is_a630(adreno_gpu)) + gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); + + /* Make all blocks contribute to the GPU BUSY perf counter */ + gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); + + /* Disable L2 bypass in the UCHE */ + gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0); + gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff); + gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000); + gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff); + gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000); + gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff); + + if (!adreno_is_a650_family(adreno_gpu)) { + /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ + gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); + + gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO, + 0x00100000 + adreno_gpu->gmem - 1); + } + + gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); + gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); + + if (adreno_is_a640_family(adreno_gpu) || + adreno_is_a650_family(adreno_gpu)) + gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); + else + gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); + gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); + + if (adreno_is_a660_family(adreno_gpu)) + gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); + + /* Setting the mem pool size */ + gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); + + /* Setting the primFifo thresholds default values, + * and vccCacheSkipDis=1 bit (0x200) for A640 and newer + */ + if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); + else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu)) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); + else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); + else + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); + + /* Set the AHB default slave response to "ERROR" */ + gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); + + /* Turn on performance counters */ + gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); + + /* Select CP0 to always count cycles */ + gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT); + + a6xx_set_ubwc_config(gpu); + + /* Enable fault detection */ + gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, + (1 << 30) | 0x1fffff); + + gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); + + /* Set weights for bicubic filtering */ + if (adreno_is_a650_family(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, + 0x3fe05ff4); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, + 0x3fa0ebee); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, + 0x3f5193ed); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, + 0x3f0243f0); + } + + /* Protect registers from the CP */ + a6xx_set_cp_protect(gpu); + + if (adreno_is_a660_family(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); + gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); + } + + /* Set dualQ + disable afull for A660 GPU */ + if (adreno_is_a660(adreno_gpu)) + gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); + + /* Enable expanded apriv for targets that support it */ + if (gpu->hw_apriv) { + gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, + (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1)); + } + + /* Enable interrupts */ + gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); + + ret = adreno_hw_init(gpu); + if (ret) + goto out; + + ret = a6xx_ucode_init(gpu); + if (ret) + goto out; + + /* Set the ringbuffer address */ + gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); + + /* Targets that support extended APRIV can use the RPTR shadow from + * hardware but all the other ones need to disable the feature. Targets + * that support the WHERE_AM_I opcode can use that instead + */ + if (adreno_gpu->base.hw_apriv) + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); + else + gpu_write(gpu, REG_A6XX_CP_RB_CNTL, + MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE); + + /* + * Expanded APRIV and targets that support WHERE_AM_I both need a + * privileged buffer to store the RPTR shadow + */ + + if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) { + if (!a6xx_gpu->shadow_bo) { + a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, + sizeof(u32) * gpu->nr_rings, + MSM_BO_WC | MSM_BO_MAP_PRIV, + gpu->aspace, &a6xx_gpu->shadow_bo, + &a6xx_gpu->shadow_iova); + + if (IS_ERR(a6xx_gpu->shadow)) + return PTR_ERR(a6xx_gpu->shadow); + + msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); + } + + gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO, + shadowptr(a6xx_gpu, gpu->rb[0])); + } + + /* Always come up on rb 0 */ + a6xx_gpu->cur_ring = gpu->rb[0]; + + gpu->cur_ctx_seqno = 0; + + /* Enable the SQE_to start the CP engine */ + gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); + + ret = a6xx_cp_init(gpu); + if (ret) + goto out; + + /* + * Try to load a zap shader into the secure world. If successful + * we can use the CP to switch out of secure mode. If not then we + * have no resource but to try to switch ourselves out manually. If we + * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will + * be blocked and a permissions violation will soon follow. + */ + ret = a6xx_zap_shader_init(gpu); + if (!ret) { + OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); + OUT_RING(gpu->rb[0], 0x00000000); + + a6xx_flush(gpu, gpu->rb[0]); + if (!a6xx_idle(gpu, gpu->rb[0])) + return -EINVAL; + } else if (ret == -ENODEV) { + /* + * This device does not use zap shader (but print a warning + * just in case someone got their dt wrong.. hopefully they + * have a debug UART to realize the error of their ways... + * if you mess this up you are about to crash horribly) + */ + dev_warn_once(gpu->dev->dev, + "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); + ret = 0; + } else { + return ret; + } + +out: + /* + * Tell the GMU that we are done touching the GPU and it can start power + * management + */ + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + + if (a6xx_gpu->gmu.legacy) { + /* Take the GMU out of its special boot mode */ + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); + } + + return ret; +} + +static int a6xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int ret; + + mutex_lock(&a6xx_gpu->gmu.lock); + ret = hw_init(gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); + + return ret; +} + +static void a6xx_dump(struct msm_gpu *gpu) +{ + DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", + gpu_read(gpu, REG_A6XX_RBBM_STATUS)); + adreno_dump(gpu); +} + +#define VBIF_RESET_ACK_TIMEOUT 100 +#define VBIF_RESET_ACK_MASK 0x00f0 + +static void a6xx_recover(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i, active_submits; + + adreno_dump_info(gpu); + + for (i = 0; i < 8; i++) + DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); + + if (hang_debug) + a6xx_dump(gpu); + + /* + * To handle recovery specific sequences during the rpm suspend we are + * about to trigger + */ + a6xx_gpu->hung = true; + + /* Halt SQE first */ + gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); + + /* + * Turn off keep alive that might have been enabled by the hang + * interrupt + */ + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + + pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); + + /* active_submit won't change until we make a submission */ + mutex_lock(&gpu->active_lock); + active_submits = gpu->active_submits; + + /* + * Temporarily clear active_submits count to silence a WARN() in the + * runtime suspend cb + */ + gpu->active_submits = 0; + + /* Drop the rpm refcount from active submits */ + if (active_submits) + pm_runtime_put(&gpu->pdev->dev); + + /* And the final one from recover worker */ + pm_runtime_put_sync(&gpu->pdev->dev); + + /* Call into gpucc driver to poll for cx gdsc collapse */ + reset_control_reset(gpu->cx_collapse); + + pm_runtime_use_autosuspend(&gpu->pdev->dev); + + if (active_submits) + pm_runtime_get(&gpu->pdev->dev); + + pm_runtime_get_sync(&gpu->pdev->dev); + + gpu->active_submits = active_submits; + mutex_unlock(&gpu->active_lock); + + msm_gpu_hw_init(gpu); + a6xx_gpu->hung = false; +} + +static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid) +{ + static const char *uche_clients[7] = { + "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ", + }; + u32 val; + + if (mid < 1 || mid > 3) + return "UNKNOWN"; + + /* + * The source of the data depends on the mid ID read from FSYNR1. + * and the client ID read from the UCHE block + */ + val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF); + + /* mid = 3 is most precise and refers to only one block per client */ + if (mid == 3) + return uche_clients[val & 7]; + + /* For mid=2 the source is TP or VFD except when the client id is 0 */ + if (mid == 2) + return ((val & 7) == 0) ? "TP" : "TP|VFD"; + + /* For mid=1 just return "UCHE" as a catchall for everything else */ + return "UCHE"; +} + +static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id) +{ + if (id == 0) + return "CP"; + else if (id == 4) + return "CCU"; + else if (id == 6) + return "CDP Prefetch"; + + return a6xx_uche_fault_block(gpu, id); +} + +#define ARM_SMMU_FSR_TF BIT(1) +#define ARM_SMMU_FSR_PF BIT(3) +#define ARM_SMMU_FSR_EF BIT(4) + +static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data) +{ + struct msm_gpu *gpu = arg; + struct adreno_smmu_fault_info *info = data; + const char *type = "UNKNOWN"; + const char *block; + bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); + + /* + * If we aren't going to be resuming later from fault_worker, then do + * it now. + */ + if (!do_devcoredump) { + gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); + } + + /* + * Print a default message if we couldn't get the data from the + * adreno-smmu-priv + */ + if (!info) { + pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n", + iova, flags, + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7))); + + return 0; + } + + if (info->fsr & ARM_SMMU_FSR_TF) + type = "TRANSLATION"; + else if (info->fsr & ARM_SMMU_FSR_PF) + type = "PERMISSION"; + else if (info->fsr & ARM_SMMU_FSR_EF) + type = "EXTERNAL"; + + block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); + + pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n", + info->ttbr0, iova, + flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ", + type, block, + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), + gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7))); + + if (do_devcoredump) { + /* Turn off the hangcheck timer to keep it from bothering us */ + del_timer(&gpu->hangcheck_timer); + + gpu->fault_info.ttbr0 = info->ttbr0; + gpu->fault_info.iova = iova; + gpu->fault_info.flags = flags; + gpu->fault_info.type = type; + gpu->fault_info.block = block; + + kthread_queue_work(gpu->worker, &gpu->fault_work); + } + + return 0; +} + +static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) +{ + u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS); + + if (status & A6XX_CP_INT_CP_OPCODE_ERROR) { + u32 val; + + gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1); + val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA); + dev_err_ratelimited(&gpu->pdev->dev, + "CP | opcode error | possible opcode=0x%8.8X\n", + val); + } + + if (status & A6XX_CP_INT_CP_UCODE_ERROR) + dev_err_ratelimited(&gpu->pdev->dev, + "CP ucode error interrupt\n"); + + if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR) + dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", + gpu_read(gpu, REG_A6XX_CP_HW_FAULT)); + + if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) { + u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS); + + dev_err_ratelimited(&gpu->pdev->dev, + "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n", + val & (1 << 20) ? "READ" : "WRITE", + (val & 0x3ffff), val); + } + + if (status & A6XX_CP_INT_CP_AHB_ERROR) + dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); + + if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR) + dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); + + if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR) + dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); + +} + +static void a6xx_fault_detect_irq(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); + + /* + * If stalled on SMMU fault, we could trip the GPU's hang detection, + * but the fault handler will trigger the devcore dump, and we want + * to otherwise resume normally rather than killing the submit, so + * just bail. + */ + if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT) + return; + + /* + * Force the GPU to stay on until after we finish + * collecting information + */ + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); + + DRM_DEV_ERROR(&gpu->pdev->dev, + "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", + ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, + gpu_read(gpu, REG_A6XX_RBBM_STATUS), + gpu_read(gpu, REG_A6XX_CP_RB_RPTR), + gpu_read(gpu, REG_A6XX_CP_RB_WPTR), + gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), + gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), + gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), + gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); + + /* Turn off the hangcheck timer to keep it from bothering us */ + del_timer(&gpu->hangcheck_timer); + + kthread_queue_work(gpu->worker, &gpu->recover_work); +} + +static irqreturn_t a6xx_irq(struct msm_gpu *gpu) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); + + gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); + + if (priv->disable_err_irq) + status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS; + + if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT) + a6xx_fault_detect_irq(gpu); + + if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR) + dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); + + if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR) + a6xx_cp_hw_err_irq(gpu); + + if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW) + dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); + + if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW) + dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); + + if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) + dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); + + if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + msm_gpu_retire(gpu); + + return IRQ_HANDLED; +} + +static void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or) +{ + return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or); +} + +static void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value) +{ + msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2)); +} + +static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu) +{ + llcc_slice_deactivate(a6xx_gpu->llc_slice); + llcc_slice_deactivate(a6xx_gpu->htw_llc_slice); +} + +static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) +{ + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + u32 cntl1_regval = 0; + + if (IS_ERR(a6xx_gpu->llc_mmio)) + return; + + if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + + gpu_scid &= 0x1f; + cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | + (gpu_scid << 15) | (gpu_scid << 20); + + /* On A660, the SCID programming for UCHE traffic is done in + * A6XX_GBIF_SCACHE_CNTL0[14:10] + */ + if (adreno_is_a660_family(adreno_gpu)) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | + (1 << 8), (gpu_scid << 10) | (1 << 8)); + } + + /* + * For targets with a MMU500, activate the slice but don't program the + * register. The XBL will take care of that. + */ + if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) { + if (!a6xx_gpu->have_mmu500) { + u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice); + + gpuhtw_scid &= 0x1f; + cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid); + } + } + + if (!cntl1_regval) + return; + + /* + * Program the slice IDs for the various GPU blocks and GPU MMU + * pagetables + */ + if (!a6xx_gpu->have_mmu500) { + a6xx_llc_write(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval); + + /* + * Program cacheability overrides to not allocate cache + * lines on a write miss + */ + a6xx_llc_rmw(a6xx_gpu, + REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03); + return; + } + + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); +} + +static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) +{ + llcc_slice_putd(a6xx_gpu->llc_slice); + llcc_slice_putd(a6xx_gpu->htw_llc_slice); +} + +static void a6xx_llc_slices_init(struct platform_device *pdev, + struct a6xx_gpu *a6xx_gpu) +{ + struct device_node *phandle; + + /* + * There is a different programming path for targets with an mmu500 + * attached, so detect if that is the case + */ + phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); + a6xx_gpu->have_mmu500 = (phandle && + of_device_is_compatible(phandle, "arm,mmu-500")); + of_node_put(phandle); + + if (a6xx_gpu->have_mmu500) + a6xx_gpu->llc_mmio = NULL; + else + a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem"); + + a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); + a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); + + if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) + a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); +} + +static int a6xx_pm_resume(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int ret; + + gpu->needs_hw_init = true; + + trace_msm_gpu_resume(0); + + mutex_lock(&a6xx_gpu->gmu.lock); + ret = a6xx_gmu_resume(a6xx_gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); + if (ret) + return ret; + + msm_devfreq_resume(gpu); + + a6xx_llc_activate(a6xx_gpu); + + return 0; +} + +static int a6xx_pm_suspend(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int i, ret; + + trace_msm_gpu_suspend(0); + + a6xx_llc_deactivate(a6xx_gpu); + + msm_devfreq_suspend(gpu); + + mutex_lock(&a6xx_gpu->gmu.lock); + ret = a6xx_gmu_stop(a6xx_gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); + if (ret) + return ret; + + if (a6xx_gpu->shadow_bo) + for (i = 0; i < gpu->nr_rings; i++) + a6xx_gpu->shadow[i] = 0; + + gpu->suspend_count++; + + return 0; +} + +static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + mutex_lock(&a6xx_gpu->gmu.lock); + + /* Force the GPU power on so we can read this register */ + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); + + *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO); + + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); + + mutex_unlock(&a6xx_gpu->gmu.lock); + + return 0; +} + +static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + return a6xx_gpu->cur_ring; +} + +static void a6xx_destroy(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + if (a6xx_gpu->sqe_bo) { + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); + drm_gem_object_put(a6xx_gpu->sqe_bo); + } + + if (a6xx_gpu->shadow_bo) { + msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); + drm_gem_object_put(a6xx_gpu->shadow_bo); + } + + a6xx_llc_slices_destroy(a6xx_gpu); + + a6xx_gmu_remove(a6xx_gpu); + + adreno_gpu_cleanup(adreno_gpu); + + kfree(a6xx_gpu); +} + +static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + u64 busy_cycles; + + /* 19.2MHz */ + *out_sample_rate = 19200000; + + busy_cycles = gmu_read64(&a6xx_gpu->gmu, + REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, + REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); + + return busy_cycles; +} + +static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, + bool suspended) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + mutex_lock(&a6xx_gpu->gmu.lock); + a6xx_gmu_set_freq(gpu, opp, suspended); + mutex_unlock(&a6xx_gpu->gmu.lock); +} + +static struct msm_gem_address_space * +a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct iommu_domain *iommu; + struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; + u64 start, size; + + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + /* + * This allows GPU to set the bus attributes required to use system + * cache on behalf of the iommu page table walker. + */ + if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) + adreno_set_llc_attributes(iommu); + + mmu = msm_iommu_new(&pdev->dev, iommu); + if (IS_ERR(mmu)) { + iommu_domain_free(iommu); + return ERR_CAST(mmu); + } + + /* + * Use the aperture start or SZ_16M, whichever is greater. This will + * ensure that we align with the allocated pagetable range while still + * allowing room in the lower 32 bits for GMEM and whatnot + */ + start = max_t(u64, SZ_16M, iommu->geometry.aperture_start); + size = iommu->geometry.aperture_end - start + 1; + + aspace = msm_gem_address_space_create(mmu, "gpu", + start & GENMASK_ULL(48, 0), size); + + if (IS_ERR(aspace) && !IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + + return aspace; +} + +static struct msm_gem_address_space * +a6xx_create_private_address_space(struct msm_gpu *gpu) +{ + struct msm_mmu *mmu; + + mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); + + if (IS_ERR(mmu)) + return ERR_CAST(mmu); + + return msm_gem_address_space_create(mmu, + "gpu", 0x100000000ULL, + adreno_private_address_space_size(gpu)); +} + +static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) + return a6xx_gpu->shadow[ring->id]; + + return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); +} + +static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct msm_cp_state cp_state = { + .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), + .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), + .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), + .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE), + }; + bool progress; + + /* + * Adjust the remaining data to account for what has already been + * fetched from memory, but not yet consumed by the SQE. + * + * This is not *technically* correct, the amount buffered could + * exceed the IB size due to hw prefetching ahead, but: + * + * (1) We aren't trying to find the exact position, just whether + * progress has been made + * (2) The CP_REG_TO_MEM at the end of a submit should be enough + * to prevent prefetching into an unrelated submit. (And + * either way, at some point the ROQ will be full.) + */ + cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB1_STAT) >> 16; + cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_CSQ_IB2_STAT) >> 16; + + progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state)); + + ring->last_cp_state = cp_state; + + return progress; +} + +static u32 a618_get_speed_bin(u32 fuse) +{ + if (fuse == 0) + return 0; + else if (fuse == 169) + return 1; + else if (fuse == 174) + return 2; + + return UINT_MAX; +} + +static u32 a619_get_speed_bin(u32 fuse) +{ + if (fuse == 0) + return 0; + else if (fuse == 120) + return 4; + else if (fuse == 138) + return 3; + else if (fuse == 169) + return 2; + else if (fuse == 180) + return 1; + + return UINT_MAX; +} + +static u32 adreno_7c3_get_speed_bin(u32 fuse) +{ + if (fuse == 0) + return 0; + else if (fuse == 117) + return 0; + else if (fuse == 190) + return 1; + + return UINT_MAX; +} + +static u32 fuse_to_supp_hw(struct device *dev, struct adreno_rev rev, u32 fuse) +{ + u32 val = UINT_MAX; + + if (adreno_cmp_rev(ADRENO_REV(6, 1, 8, ANY_ID), rev)) + val = a618_get_speed_bin(fuse); + + if (adreno_cmp_rev(ADRENO_REV(6, 1, 9, ANY_ID), rev)) + val = a619_get_speed_bin(fuse); + + if (adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), rev)) + val = adreno_7c3_get_speed_bin(fuse); + + if (val == UINT_MAX) { + DRM_DEV_ERROR(dev, + "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", + fuse); + return UINT_MAX; + } + + return (1 << val); +} + +static int a6xx_set_supported_hw(struct device *dev, struct adreno_rev rev) +{ + u32 supp_hw; + u32 speedbin; + int ret; + + ret = adreno_read_speedbin(dev, &speedbin); + /* + * -ENOENT means that the platform doesn't support speedbin which is + * fine + */ + if (ret == -ENOENT) { + return 0; + } else if (ret) { + dev_err_probe(dev, ret, + "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); + return ret; + } + + supp_hw = fuse_to_supp_hw(dev, rev, speedbin); + + ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1); + if (ret) + return ret; + + return 0; +} + +static const struct adreno_gpu_funcs funcs = { + .base = { + .get_param = adreno_get_param, + .set_param = adreno_set_param, + .hw_init = a6xx_hw_init, + .pm_suspend = a6xx_pm_suspend, + .pm_resume = a6xx_pm_resume, + .recover = a6xx_recover, + .submit = a6xx_submit, + .active_ring = a6xx_active_ring, + .irq = a6xx_irq, + .destroy = a6xx_destroy, +#if defined(CONFIG_DRM_MSM_GPU_STATE) + .show = a6xx_show, +#endif + .gpu_busy = a6xx_gpu_busy, + .gpu_get_freq = a6xx_gmu_get_freq, + .gpu_set_freq = a6xx_gpu_set_freq, +#if defined(CONFIG_DRM_MSM_GPU_STATE) + .gpu_state_get = a6xx_gpu_state_get, + .gpu_state_put = a6xx_gpu_state_put, +#endif + .create_address_space = a6xx_create_address_space, + .create_private_address_space = a6xx_create_private_address_space, + .get_rptr = a6xx_get_rptr, + .progress = a6xx_progress, + }, + .get_timestamp = a6xx_get_timestamp, +}; + +struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct adreno_platform_config *config = pdev->dev.platform_data; + const struct adreno_info *info; + struct device_node *node; + struct a6xx_gpu *a6xx_gpu; + struct adreno_gpu *adreno_gpu; + struct msm_gpu *gpu; + int ret; + + a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL); + if (!a6xx_gpu) + return ERR_PTR(-ENOMEM); + + adreno_gpu = &a6xx_gpu->base; + gpu = &adreno_gpu->base; + + adreno_gpu->registers = NULL; + + /* + * We need to know the platform type before calling into adreno_gpu_init + * so that the hw_apriv flag can be correctly set. Snoop into the info + * and grab the revision number + */ + info = adreno_info(config->rev); + + if (info && (info->revn == 650 || info->revn == 660 || + adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev))) + adreno_gpu->base.hw_apriv = true; + + /* + * For now only clamp to idle freq for devices where this is known not + * to cause power supply issues: + */ + if (info && (info->revn == 618)) + gpu->clamp_to_idle = true; + + a6xx_llc_slices_init(pdev, a6xx_gpu); + + ret = a6xx_set_supported_hw(&pdev->dev, config->rev); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + + /* Check if there is a GMU phandle and set it up */ + node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); + + /* FIXME: How do we gracefully handle this? */ + BUG_ON(!node); + + ret = a6xx_gmu_init(a6xx_gpu, node); + of_node_put(node); + if (ret) { + a6xx_destroy(&(a6xx_gpu->base.base)); + return ERR_PTR(ret); + } + + if (gpu->aspace) + msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, + a6xx_fault_handler); + + return gpu; +} diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h new file mode 100644 index 000000000..eea2e60ce --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */ + +#ifndef __A6XX_GPU_H__ +#define __A6XX_GPU_H__ + + +#include "adreno_gpu.h" +#include "a6xx.xml.h" + +#include "a6xx_gmu.h" + +extern bool hang_debug; + +struct a6xx_gpu { + struct adreno_gpu base; + + struct drm_gem_object *sqe_bo; + uint64_t sqe_iova; + + struct msm_ringbuffer *cur_ring; + + struct a6xx_gmu gmu; + + struct drm_gem_object *shadow_bo; + uint64_t shadow_iova; + uint32_t *shadow; + + bool has_whereami; + + void __iomem *llc_mmio; + void *llc_slice; + void *htw_llc_slice; + bool have_mmu500; + bool hung; +}; + +#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base) + +/* + * Given a register and a count, return a value to program into + * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len + * registers starting at _reg. + */ +#define A6XX_PROTECT_NORDWR(_reg, _len) \ + ((1 << 31) | \ + (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) + +/* + * Same as above, but allow reads over the range. For areas of mixed use (such + * as performance counters) this allows us to protect a much larger range with a + * single register + */ +#define A6XX_PROTECT_RDONLY(_reg, _len) \ + ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF)) + +static inline bool a6xx_has_gbif(struct adreno_gpu *gpu) +{ + if(adreno_is_a630(gpu)) + return false; + + return true; +} + +#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \ + ((_ring)->id * sizeof(uint32_t))) + +int a6xx_gmu_resume(struct a6xx_gpu *gpu); +int a6xx_gmu_stop(struct a6xx_gpu *gpu); + +int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); + +bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); + +int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); +void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); + +int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node); +void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu); + +void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, + bool suspended); +unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu); + +void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, + struct drm_printer *p); + +struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu); +int a6xx_gpu_state_put(struct msm_gpu_state *state); + +#endif /* __A6XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c new file mode 100644 index 000000000..a023d5f96 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -0,0 +1,1342 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ + +#include +#include "msm_gem.h" +#include "a6xx_gpu.h" +#include "a6xx_gmu.h" +#include "a6xx_gpu_state.h" +#include "a6xx_gmu.xml.h" + +struct a6xx_gpu_state_obj { + const void *handle; + u32 *data; +}; + +struct a6xx_gpu_state { + struct msm_gpu_state base; + + struct a6xx_gpu_state_obj *gmu_registers; + int nr_gmu_registers; + + struct a6xx_gpu_state_obj *registers; + int nr_registers; + + struct a6xx_gpu_state_obj *shaders; + int nr_shaders; + + struct a6xx_gpu_state_obj *clusters; + int nr_clusters; + + struct a6xx_gpu_state_obj *dbgahb_clusters; + int nr_dbgahb_clusters; + + struct a6xx_gpu_state_obj *indexed_regs; + int nr_indexed_regs; + + struct a6xx_gpu_state_obj *debugbus; + int nr_debugbus; + + struct a6xx_gpu_state_obj *vbif_debugbus; + + struct a6xx_gpu_state_obj *cx_debugbus; + int nr_cx_debugbus; + + struct msm_gpu_state_bo *gmu_log; + struct msm_gpu_state_bo *gmu_hfi; + struct msm_gpu_state_bo *gmu_debug; + + s32 hfi_queue_history[2][HFI_HISTORY_SZ]; + + struct list_head objs; + + bool gpu_initialized; +}; + +static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val) +{ + in[0] = val; + in[1] = (((u64) reg) << 44 | (1 << 21) | 1); + + return 2; +} + +static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target) +{ + in[0] = target; + in[1] = (((u64) reg) << 44 | dwords); + + return 2; +} + +static inline int CRASHDUMP_FINI(u64 *in) +{ + in[0] = 0; + in[1] = 0; + + return 2; +} + +struct a6xx_crashdumper { + void *ptr; + struct drm_gem_object *bo; + u64 iova; +}; + +struct a6xx_state_memobj { + struct list_head node; + unsigned long long data[]; +}; + +static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize) +{ + struct a6xx_state_memobj *obj = + kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL); + + if (!obj) + return NULL; + + list_add_tail(&obj->node, &a6xx_state->objs); + return &obj->data; +} + +static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src, + size_t size) +{ + void *dst = state_kcalloc(a6xx_state, 1, size); + + if (dst) + memcpy(dst, src, size); + return dst; +} + +/* + * Allocate 1MB for the crashdumper scratch region - 8k for the script and + * the rest for the data + */ +#define A6XX_CD_DATA_OFFSET 8192 +#define A6XX_CD_DATA_SIZE (SZ_1M - 8192) + +static int a6xx_crashdumper_init(struct msm_gpu *gpu, + struct a6xx_crashdumper *dumper) +{ + dumper->ptr = msm_gem_kernel_new(gpu->dev, + SZ_1M, MSM_BO_WC, gpu->aspace, + &dumper->bo, &dumper->iova); + + if (!IS_ERR(dumper->ptr)) + msm_gem_object_set_name(dumper->bo, "crashdump"); + + return PTR_ERR_OR_ZERO(dumper->ptr); +} + +static int a6xx_crashdumper_run(struct msm_gpu *gpu, + struct a6xx_crashdumper *dumper) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + u32 val; + int ret; + + if (IS_ERR_OR_NULL(dumper->ptr)) + return -EINVAL; + + if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) + return -EINVAL; + + /* Make sure all pending memory writes are posted */ + wmb(); + + gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova); + + gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); + + ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, + val & 0x02, 100, 10000); + + gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); + + return ret; +} + +/* read a value from the GX debug bus */ +static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, + u32 *data) +{ + u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) | + A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block); + + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg); + + /* Wait 1 us to make sure the data is flowing */ + udelay(1); + + data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2); + data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1); + + return 2; +} + +#define cxdbg_write(ptr, offset, val) \ + msm_writel((val), (ptr) + ((offset) << 2)) + +#define cxdbg_read(ptr, offset) \ + msm_readl((ptr) + ((offset) << 2)) + +/* read a value from the CX debug bus */ +static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset, + u32 *data) +{ + u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) | + A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block); + + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg); + + /* Wait 1 us to make sure the data is flowing */ + udelay(1); + + data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2); + data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1); + + return 2; +} + +/* Read a chunk of data from the VBIF debug bus */ +static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1, + u32 reg, int count, u32 *data) +{ + int i; + + gpu_write(gpu, ctrl0, reg); + + for (i = 0; i < count; i++) { + gpu_write(gpu, ctrl1, i); + data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT); + } + + return count; +} + +#define AXI_ARB_BLOCKS 2 +#define XIN_AXI_BLOCKS 5 +#define XIN_CORE_BLOCKS 4 + +#define VBIF_DEBUGBUS_BLOCK_SIZE \ + ((16 * AXI_ARB_BLOCKS) + \ + (18 * XIN_AXI_BLOCKS) + \ + (12 * XIN_CORE_BLOCKS)) + +static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + struct a6xx_gpu_state_obj *obj) +{ + u32 clk, *ptr; + int i; + + obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE, + sizeof(u32)); + if (!obj->data) + return; + + obj->handle = NULL; + + /* Get the current clock setting */ + clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON); + + /* Force on the bus so we can read it */ + gpu_write(gpu, REG_A6XX_VBIF_CLKON, + clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS); + + /* We will read from BUS2 first, so disable BUS1 */ + gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0); + + /* Enable the VBIF bus for reading */ + gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1); + + ptr = obj->data; + + for (i = 0; i < AXI_ARB_BLOCKS; i++) + ptr += vbif_debugbus_read(gpu, + REG_A6XX_VBIF_TEST_BUS2_CTRL0, + REG_A6XX_VBIF_TEST_BUS2_CTRL1, + 1 << (i + 16), 16, ptr); + + for (i = 0; i < XIN_AXI_BLOCKS; i++) + ptr += vbif_debugbus_read(gpu, + REG_A6XX_VBIF_TEST_BUS2_CTRL0, + REG_A6XX_VBIF_TEST_BUS2_CTRL1, + 1 << i, 18, ptr); + + /* Stop BUS2 so we can turn on BUS1 */ + gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0); + + for (i = 0; i < XIN_CORE_BLOCKS; i++) + ptr += vbif_debugbus_read(gpu, + REG_A6XX_VBIF_TEST_BUS1_CTRL0, + REG_A6XX_VBIF_TEST_BUS1_CTRL1, + 1 << i, 12, ptr); + + /* Restore the VBIF clock setting */ + gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk); +} + +static void a6xx_get_debugbus_block(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_debugbus_block *block, + struct a6xx_gpu_state_obj *obj) +{ + int i; + u32 *ptr; + + obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64)); + if (!obj->data) + return; + + obj->handle = block; + + for (ptr = obj->data, i = 0; i < block->count; i++) + ptr += debugbus_read(gpu, block->id, i, ptr); +} + +static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_debugbus_block *block, + struct a6xx_gpu_state_obj *obj) +{ + int i; + u32 *ptr; + + obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64)); + if (!obj->data) + return; + + obj->handle = block; + + for (ptr = obj->data, i = 0; i < block->count; i++) + ptr += cx_debugbus_read(cxdbg, block->id, i, ptr); +} + +static void a6xx_get_debugbus(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state) +{ + struct resource *res; + void __iomem *cxdbg = NULL; + int nr_debugbus_blocks; + + /* Set up the GX debug bus */ + + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT, + A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf)); + + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM, + A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf)); + + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0); + + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98); + + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0); + gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0); + + /* Set up the CX debug bus - it lives elsewhere in the system so do a + * temporary ioremap for the registers + */ + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, + "cx_dbgc"); + + if (res) + cxdbg = ioremap(res->start, resource_size(res)); + + if (cxdbg) { + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT, + A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf)); + + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM, + A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf)); + + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0); + + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0, + 0x76543210); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1, + 0xFEDCBA98); + + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0); + } + + nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) + + (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0); + + a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks, + sizeof(*a6xx_state->debugbus)); + + if (a6xx_state->debugbus) { + int i; + + for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++) + a6xx_get_debugbus_block(gpu, + a6xx_state, + &a6xx_debugbus_blocks[i], + &a6xx_state->debugbus[i]); + + a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks); + + /* + * GBIF has same debugbus as of other GPU blocks, fall back to + * default path if GPU uses GBIF, also GBIF uses exactly same + * ID as of VBIF. + */ + if (a6xx_has_gbif(to_adreno_gpu(gpu))) { + a6xx_get_debugbus_block(gpu, a6xx_state, + &a6xx_gbif_debugbus_block, + &a6xx_state->debugbus[i]); + + a6xx_state->nr_debugbus += 1; + } + } + + /* Dump the VBIF debugbus on applicable targets */ + if (!a6xx_has_gbif(to_adreno_gpu(gpu))) { + a6xx_state->vbif_debugbus = + state_kcalloc(a6xx_state, 1, + sizeof(*a6xx_state->vbif_debugbus)); + + if (a6xx_state->vbif_debugbus) + a6xx_get_vbif_debugbus_block(gpu, a6xx_state, + a6xx_state->vbif_debugbus); + } + + if (cxdbg) { + a6xx_state->cx_debugbus = + state_kcalloc(a6xx_state, + ARRAY_SIZE(a6xx_cx_debugbus_blocks), + sizeof(*a6xx_state->cx_debugbus)); + + if (a6xx_state->cx_debugbus) { + int i; + + for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++) + a6xx_get_cx_debugbus_block(cxdbg, + a6xx_state, + &a6xx_cx_debugbus_blocks[i], + &a6xx_state->cx_debugbus[i]); + + a6xx_state->nr_cx_debugbus = + ARRAY_SIZE(a6xx_cx_debugbus_blocks); + } + + iounmap(cxdbg); + } +} + +#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1) + +/* Read a data cluster from behind the AHB aperture */ +static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_dbgahb_cluster *dbgahb, + struct a6xx_gpu_state_obj *obj, + struct a6xx_crashdumper *dumper) +{ + u64 *in = dumper->ptr; + u64 out = dumper->iova + A6XX_CD_DATA_OFFSET; + size_t datasize; + int i, regcount = 0; + + for (i = 0; i < A6XX_NUM_CONTEXTS; i++) { + int j; + + in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, + (dbgahb->statetype + i * 2) << 8); + + for (j = 0; j < dbgahb->count; j += 2) { + int count = RANGE(dbgahb->registers, j); + u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE + + dbgahb->registers[j] - (dbgahb->base >> 2); + + in += CRASHDUMP_READ(in, offset, count, out); + + out += count * sizeof(u32); + + if (i == 0) + regcount += count; + } + } + + CRASHDUMP_FINI(in); + + datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32); + + if (WARN_ON(datasize > A6XX_CD_DATA_SIZE)) + return; + + if (a6xx_crashdumper_run(gpu, dumper)) + return; + + obj->handle = dbgahb; + obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET, + datasize); +} + +static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + struct a6xx_crashdumper *dumper) +{ + int i; + + a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state, + ARRAY_SIZE(a6xx_dbgahb_clusters), + sizeof(*a6xx_state->dbgahb_clusters)); + + if (!a6xx_state->dbgahb_clusters) + return; + + a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters); + + for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++) + a6xx_get_dbgahb_cluster(gpu, a6xx_state, + &a6xx_dbgahb_clusters[i], + &a6xx_state->dbgahb_clusters[i], dumper); +} + +/* Read a data cluster from the CP aperture with the crashdumper */ +static void a6xx_get_cluster(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_cluster *cluster, + struct a6xx_gpu_state_obj *obj, + struct a6xx_crashdumper *dumper) +{ + u64 *in = dumper->ptr; + u64 out = dumper->iova + A6XX_CD_DATA_OFFSET; + size_t datasize; + int i, regcount = 0; + + /* Some clusters need a selector register to be programmed too */ + if (cluster->sel_reg) + in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val); + + for (i = 0; i < A6XX_NUM_CONTEXTS; i++) { + int j; + + in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD, + (cluster->id << 8) | (i << 4) | i); + + for (j = 0; j < cluster->count; j += 2) { + int count = RANGE(cluster->registers, j); + + in += CRASHDUMP_READ(in, cluster->registers[j], + count, out); + + out += count * sizeof(u32); + + if (i == 0) + regcount += count; + } + } + + CRASHDUMP_FINI(in); + + datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32); + + if (WARN_ON(datasize > A6XX_CD_DATA_SIZE)) + return; + + if (a6xx_crashdumper_run(gpu, dumper)) + return; + + obj->handle = cluster; + obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET, + datasize); +} + +static void a6xx_get_clusters(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + struct a6xx_crashdumper *dumper) +{ + int i; + + a6xx_state->clusters = state_kcalloc(a6xx_state, + ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters)); + + if (!a6xx_state->clusters) + return; + + a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters); + + for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) + a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i], + &a6xx_state->clusters[i], dumper); +} + +/* Read a shader / debug block from the HLSQ aperture with the crashdumper */ +static void a6xx_get_shader_block(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_shader_block *block, + struct a6xx_gpu_state_obj *obj, + struct a6xx_crashdumper *dumper) +{ + u64 *in = dumper->ptr; + size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32); + int i; + + if (WARN_ON(datasize > A6XX_CD_DATA_SIZE)) + return; + + for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) { + in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, + (block->type << 8) | i); + + in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE, + block->size, dumper->iova + A6XX_CD_DATA_OFFSET); + } + + CRASHDUMP_FINI(in); + + if (a6xx_crashdumper_run(gpu, dumper)) + return; + + obj->handle = block; + obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET, + datasize); +} + +static void a6xx_get_shaders(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + struct a6xx_crashdumper *dumper) +{ + int i; + + a6xx_state->shaders = state_kcalloc(a6xx_state, + ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders)); + + if (!a6xx_state->shaders) + return; + + a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks); + + for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) + a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i], + &a6xx_state->shaders[i], dumper); +} + +/* Read registers from behind the HLSQ aperture with the crashdumper */ +static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_registers *regs, + struct a6xx_gpu_state_obj *obj, + struct a6xx_crashdumper *dumper) + +{ + u64 *in = dumper->ptr; + u64 out = dumper->iova + A6XX_CD_DATA_OFFSET; + int i, regcount = 0; + + in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1); + + for (i = 0; i < regs->count; i += 2) { + u32 count = RANGE(regs->registers, i); + u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE + + regs->registers[i] - (regs->val0 >> 2); + + in += CRASHDUMP_READ(in, offset, count, out); + + out += count * sizeof(u32); + regcount += count; + } + + CRASHDUMP_FINI(in); + + if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE)) + return; + + if (a6xx_crashdumper_run(gpu, dumper)) + return; + + obj->handle = regs; + obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET, + regcount * sizeof(u32)); +} + +/* Read a block of registers using the crashdumper */ +static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_registers *regs, + struct a6xx_gpu_state_obj *obj, + struct a6xx_crashdumper *dumper) + +{ + u64 *in = dumper->ptr; + u64 out = dumper->iova + A6XX_CD_DATA_OFFSET; + int i, regcount = 0; + + /* Some blocks might need to program a selector register first */ + if (regs->val0) + in += CRASHDUMP_WRITE(in, regs->val0, regs->val1); + + for (i = 0; i < regs->count; i += 2) { + u32 count = RANGE(regs->registers, i); + + in += CRASHDUMP_READ(in, regs->registers[i], count, out); + + out += count * sizeof(u32); + regcount += count; + } + + CRASHDUMP_FINI(in); + + if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE)) + return; + + if (a6xx_crashdumper_run(gpu, dumper)) + return; + + obj->handle = regs; + obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET, + regcount * sizeof(u32)); +} + +/* Read a block of registers via AHB */ +static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_registers *regs, + struct a6xx_gpu_state_obj *obj) +{ + int i, regcount = 0, index = 0; + + for (i = 0; i < regs->count; i += 2) + regcount += RANGE(regs->registers, i); + + obj->handle = (const void *) regs; + obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32)); + if (!obj->data) + return; + + for (i = 0; i < regs->count; i += 2) { + u32 count = RANGE(regs->registers, i); + int j; + + for (j = 0; j < count; j++) + obj->data[index++] = gpu_read(gpu, + regs->registers[i] + j); + } +} + +/* Read a block of GMU registers */ +static void _a6xx_get_gmu_registers(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_registers *regs, + struct a6xx_gpu_state_obj *obj, + bool rscc) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + int i, regcount = 0, index = 0; + + for (i = 0; i < regs->count; i += 2) + regcount += RANGE(regs->registers, i); + + obj->handle = (const void *) regs; + obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32)); + if (!obj->data) + return; + + for (i = 0; i < regs->count; i += 2) { + u32 count = RANGE(regs->registers, i); + int j; + + for (j = 0; j < count; j++) { + u32 offset = regs->registers[i] + j; + u32 val; + + if (rscc) + val = gmu_read_rscc(gmu, offset); + else + val = gmu_read(gmu, offset); + + obj->data[index++] = val; + } + } +} + +static void a6xx_get_gmu_registers(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + a6xx_state->gmu_registers = state_kcalloc(a6xx_state, + 3, sizeof(*a6xx_state->gmu_registers)); + + if (!a6xx_state->gmu_registers) + return; + + a6xx_state->nr_gmu_registers = 3; + + /* Get the CX GMU registers from AHB */ + _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], + &a6xx_state->gmu_registers[0], false); + _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1], + &a6xx_state->gmu_registers[1], true); + + if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) + return; + + /* Set the fence to ALLOW mode so we can access the registers */ + gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + + _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2], + &a6xx_state->gmu_registers[2], false); +} + +static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo( + struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo) +{ + struct msm_gpu_state_bo *snapshot; + + if (!bo->size) + return NULL; + + snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot)); + if (!snapshot) + return NULL; + + snapshot->iova = bo->iova; + snapshot->size = bo->size; + snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL); + if (!snapshot->data) + return NULL; + + memcpy(snapshot->data, bo->virt, bo->size); + + return snapshot; +} + +static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + unsigned i, j; + + BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history)); + + for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { + struct a6xx_hfi_queue *queue = &gmu->queues[i]; + for (j = 0; j < HFI_HISTORY_SZ; j++) { + unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ; + a6xx_state->hfi_queue_history[i][j] = queue->history[idx]; + } + } +} + +#define A6XX_GBIF_REGLIST_SIZE 1 +static void a6xx_get_registers(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + struct a6xx_crashdumper *dumper) +{ + int i, count = ARRAY_SIZE(a6xx_ahb_reglist) + + ARRAY_SIZE(a6xx_reglist) + + ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE; + int index = 0; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + + a6xx_state->registers = state_kcalloc(a6xx_state, + count, sizeof(*a6xx_state->registers)); + + if (!a6xx_state->registers) + return; + + a6xx_state->nr_registers = count; + + for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++) + a6xx_get_ahb_gpu_registers(gpu, + a6xx_state, &a6xx_ahb_reglist[i], + &a6xx_state->registers[index++]); + + if (a6xx_has_gbif(adreno_gpu)) + a6xx_get_ahb_gpu_registers(gpu, + a6xx_state, &a6xx_gbif_reglist, + &a6xx_state->registers[index++]); + else + a6xx_get_ahb_gpu_registers(gpu, + a6xx_state, &a6xx_vbif_reglist, + &a6xx_state->registers[index++]); + if (!dumper) { + /* + * We can't use the crashdumper when the SMMU is stalled, + * because the GPU has no memory access until we resume + * translation (but we don't want to do that until after + * we have captured as much useful GPU state as possible). + * So instead collect registers via the CPU: + */ + for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++) + a6xx_get_ahb_gpu_registers(gpu, + a6xx_state, &a6xx_reglist[i], + &a6xx_state->registers[index++]); + return; + } + + for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++) + a6xx_get_crashdumper_registers(gpu, + a6xx_state, &a6xx_reglist[i], + &a6xx_state->registers[index++], + dumper); + + for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++) + a6xx_get_crashdumper_hlsq_registers(gpu, + a6xx_state, &a6xx_hlsq_reglist[i], + &a6xx_state->registers[index++], + dumper); +} + +/* Read a block of data from an indexed register pair */ +static void a6xx_get_indexed_regs(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state, + const struct a6xx_indexed_registers *indexed, + struct a6xx_gpu_state_obj *obj) +{ + int i; + + obj->handle = (const void *) indexed; + obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32)); + if (!obj->data) + return; + + /* All the indexed banks start at address 0 */ + gpu_write(gpu, indexed->addr, 0); + + /* Read the data - each read increments the internal address by 1 */ + for (i = 0; i < indexed->count; i++) + obj->data[i] = gpu_read(gpu, indexed->data); +} + +static void a6xx_get_indexed_registers(struct msm_gpu *gpu, + struct a6xx_gpu_state *a6xx_state) +{ + u32 mempool_size; + int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1; + int i; + + a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count, + sizeof(*a6xx_state->indexed_regs)); + if (!a6xx_state->indexed_regs) + return; + + for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++) + a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i], + &a6xx_state->indexed_regs[i]); + + /* Set the CP mempool size to 0 to stabilize it while dumping */ + mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE); + gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0); + + /* Get the contents of the CP mempool */ + a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed, + &a6xx_state->indexed_regs[i]); + + /* + * Offset 0x2000 in the mempool is the size - copy the saved size over + * so the data is consistent + */ + a6xx_state->indexed_regs[i].data[0x2000] = mempool_size; + + /* Restore the size in the hardware */ + gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size); + + a6xx_state->nr_indexed_regs = count; +} + +struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) +{ + struct a6xx_crashdumper _dumper = { 0 }, *dumper = NULL; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state), + GFP_KERNEL); + bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & + A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT); + + if (!a6xx_state) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&a6xx_state->objs); + + /* Get the generic state from the adreno core */ + adreno_gpu_state_get(gpu, &a6xx_state->base); + + a6xx_get_gmu_registers(gpu, a6xx_state); + + a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log); + a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi); + a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug); + + a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state); + + /* If GX isn't on the rest of the data isn't going to be accessible */ + if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) + return &a6xx_state->base; + + /* Get the banks of indexed registers */ + a6xx_get_indexed_registers(gpu, a6xx_state); + + /* + * Try to initialize the crashdumper, if we are not dumping state + * with the SMMU stalled. The crashdumper needs memory access to + * write out GPU state, so we need to skip this when the SMMU is + * stalled in response to an iova fault + */ + if (!stalled && !gpu->needs_hw_init && + !a6xx_crashdumper_init(gpu, &_dumper)) { + dumper = &_dumper; + } + + a6xx_get_registers(gpu, a6xx_state, dumper); + + if (dumper) { + a6xx_get_shaders(gpu, a6xx_state, dumper); + a6xx_get_clusters(gpu, a6xx_state, dumper); + a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper); + + msm_gem_kernel_put(dumper->bo, gpu->aspace); + } + + if (snapshot_debugbus) + a6xx_get_debugbus(gpu, a6xx_state); + + a6xx_state->gpu_initialized = !gpu->needs_hw_init; + + return &a6xx_state->base; +} + +static void a6xx_gpu_state_destroy(struct kref *kref) +{ + struct a6xx_state_memobj *obj, *tmp; + struct msm_gpu_state *state = container_of(kref, + struct msm_gpu_state, ref); + struct a6xx_gpu_state *a6xx_state = container_of(state, + struct a6xx_gpu_state, base); + + if (a6xx_state->gmu_log) + kvfree(a6xx_state->gmu_log->data); + + if (a6xx_state->gmu_hfi) + kvfree(a6xx_state->gmu_hfi->data); + + if (a6xx_state->gmu_debug) + kvfree(a6xx_state->gmu_debug->data); + + list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) { + list_del(&obj->node); + kvfree(obj); + } + + adreno_gpu_state_destroy(state); + kfree(a6xx_state); +} + +int a6xx_gpu_state_put(struct msm_gpu_state *state) +{ + if (IS_ERR_OR_NULL(state)) + return 1; + + return kref_put(&state->ref, a6xx_gpu_state_destroy); +} + +static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count, + struct drm_printer *p) +{ + int i, index = 0; + + if (!data) + return; + + for (i = 0; i < count; i += 2) { + u32 count = RANGE(registers, i); + u32 offset = registers[i]; + int j; + + for (j = 0; j < count; index++, offset++, j++) { + if (data[index] == 0xdeafbead) + continue; + + drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n", + offset << 2, data[index]); + } + } +} + +static void print_ascii85(struct drm_printer *p, size_t len, u32 *data) +{ + char out[ASCII85_BUFSZ]; + long i, l, datalen = 0; + + for (i = 0; i < len >> 2; i++) { + if (data[i]) + datalen = (i + 1) << 2; + } + + if (datalen == 0) + return; + + drm_puts(p, " data: !!ascii85 |\n"); + drm_puts(p, " "); + + + l = ascii85_encode_len(datalen); + + for (i = 0; i < l; i++) + drm_puts(p, ascii85_encode(data[i], out)); + + drm_puts(p, "\n"); +} + +static void print_name(struct drm_printer *p, const char *fmt, const char *name) +{ + drm_puts(p, fmt); + drm_puts(p, name); + drm_puts(p, "\n"); +} + +static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj, + struct drm_printer *p) +{ + const struct a6xx_shader_block *block = obj->handle; + int i; + + if (!obj->handle) + return; + + print_name(p, " - type: ", block->name); + + for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) { + drm_printf(p, " - bank: %d\n", i); + drm_printf(p, " size: %d\n", block->size); + + if (!obj->data) + continue; + + print_ascii85(p, block->size << 2, + obj->data + (block->size * i)); + } +} + +static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data, + struct drm_printer *p) +{ + int ctx, index = 0; + + for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) { + int j; + + drm_printf(p, " - context: %d\n", ctx); + + for (j = 0; j < size; j += 2) { + u32 count = RANGE(registers, j); + u32 offset = registers[j]; + int k; + + for (k = 0; k < count; index++, offset++, k++) { + if (data[index] == 0xdeafbead) + continue; + + drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n", + offset << 2, data[index]); + } + } + } +} + +static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj, + struct drm_printer *p) +{ + const struct a6xx_dbgahb_cluster *dbgahb = obj->handle; + + if (dbgahb) { + print_name(p, " - cluster-name: ", dbgahb->name); + a6xx_show_cluster_data(dbgahb->registers, dbgahb->count, + obj->data, p); + } +} + +static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj, + struct drm_printer *p) +{ + const struct a6xx_cluster *cluster = obj->handle; + + if (cluster) { + print_name(p, " - cluster-name: ", cluster->name); + a6xx_show_cluster_data(cluster->registers, cluster->count, + obj->data, p); + } +} + +static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj, + struct drm_printer *p) +{ + const struct a6xx_indexed_registers *indexed = obj->handle; + + if (!indexed) + return; + + print_name(p, " - regs-name: ", indexed->name); + drm_printf(p, " dwords: %d\n", indexed->count); + + print_ascii85(p, indexed->count << 2, obj->data); +} + +static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block, + u32 *data, struct drm_printer *p) +{ + if (block) { + print_name(p, " - debugbus-block: ", block->name); + + /* + * count for regular debugbus data is in quadwords, + * but print the size in dwords for consistency + */ + drm_printf(p, " count: %d\n", block->count << 1); + + print_ascii85(p, block->count << 3, data); + } +} + +static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state, + struct drm_printer *p) +{ + int i; + + for (i = 0; i < a6xx_state->nr_debugbus; i++) { + struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i]; + + a6xx_show_debugbus_block(obj->handle, obj->data, p); + } + + if (a6xx_state->vbif_debugbus) { + struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus; + + drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n"); + drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE); + + /* vbif debugbus data is in dwords. Confusing, huh? */ + print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data); + } + + for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) { + struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i]; + + a6xx_show_debugbus_block(obj->handle, obj->data, p); + } +} + +void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, + struct drm_printer *p) +{ + struct a6xx_gpu_state *a6xx_state = container_of(state, + struct a6xx_gpu_state, base); + int i; + + if (IS_ERR_OR_NULL(state)) + return; + + drm_printf(p, "gpu-initialized: %d\n", a6xx_state->gpu_initialized); + + adreno_show(gpu, state, p); + + drm_puts(p, "gmu-log:\n"); + if (a6xx_state->gmu_log) { + struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log; + + drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova); + drm_printf(p, " size: %zu\n", gmu_log->size); + adreno_show_object(p, &gmu_log->data, gmu_log->size, + &gmu_log->encoded); + } + + drm_puts(p, "gmu-hfi:\n"); + if (a6xx_state->gmu_hfi) { + struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi; + unsigned i, j; + + drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova); + drm_printf(p, " size: %zu\n", gmu_hfi->size); + for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) { + drm_printf(p, " queue-history[%u]:", i); + for (j = 0; j < HFI_HISTORY_SZ; j++) { + drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]); + } + drm_printf(p, "\n"); + } + adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size, + &gmu_hfi->encoded); + } + + drm_puts(p, "gmu-debug:\n"); + if (a6xx_state->gmu_debug) { + struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug; + + drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova); + drm_printf(p, " size: %zu\n", gmu_debug->size); + adreno_show_object(p, &gmu_debug->data, gmu_debug->size, + &gmu_debug->encoded); + } + + drm_puts(p, "registers:\n"); + for (i = 0; i < a6xx_state->nr_registers; i++) { + struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i]; + const struct a6xx_registers *regs = obj->handle; + + if (!obj->handle) + continue; + + a6xx_show_registers(regs->registers, obj->data, regs->count, p); + } + + drm_puts(p, "registers-gmu:\n"); + for (i = 0; i < a6xx_state->nr_gmu_registers; i++) { + struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i]; + const struct a6xx_registers *regs = obj->handle; + + if (!obj->handle) + continue; + + a6xx_show_registers(regs->registers, obj->data, regs->count, p); + } + + drm_puts(p, "indexed-registers:\n"); + for (i = 0; i < a6xx_state->nr_indexed_regs; i++) + a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p); + + drm_puts(p, "shader-blocks:\n"); + for (i = 0; i < a6xx_state->nr_shaders; i++) + a6xx_show_shader(&a6xx_state->shaders[i], p); + + drm_puts(p, "clusters:\n"); + for (i = 0; i < a6xx_state->nr_clusters; i++) + a6xx_show_cluster(&a6xx_state->clusters[i], p); + + for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++) + a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p); + + drm_puts(p, "debugbus:\n"); + a6xx_show_debugbus(a6xx_state, p); +} diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h new file mode 100644 index 000000000..3bd2065a9 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -0,0 +1,446 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */ + +#ifndef _A6XX_CRASH_DUMP_H_ +#define _A6XX_CRASH_DUMP_H_ + +#include "a6xx.xml.h" + +#define A6XX_NUM_CONTEXTS 2 +#define A6XX_NUM_SHADER_BANKS 3 + +static const u32 a6xx_gras_cluster[] = { + 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6, + 0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110, + 0x8400, 0x840b, +}; + +static const u32 a6xx_ps_cluster_rac[] = { + 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865, + 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898, + 0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a, + 0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33, +}; + +static const u32 a6xx_ps_cluster_rbp[] = { + 0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1, + 0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25, +}; + +static const u32 a6xx_ps_cluster[] = { + 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306, +}; + +static const u32 a6xx_fe_cluster[] = { + 0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009, + 0xa00e, 0xa0ef, 0xa0f8, 0xa0f8, +}; + +static const u32 a6xx_pc_vs_cluster[] = { + 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07, +}; + +#define CLUSTER_FE 0 +#define CLUSTER_SP_VS 1 +#define CLUSTER_PC_VS 2 +#define CLUSTER_GRAS 3 +#define CLUSTER_SP_PS 4 +#define CLUSTER_PS 5 + +#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \ + { .id = _id, .name = #_id,\ + .registers = _reg, \ + .count = ARRAY_SIZE(_reg), \ + .sel_reg = _sel_reg, .sel_val = _sel_val } + +static const struct a6xx_cluster { + u32 id; + const char *name; + const u32 *registers; + size_t count; + u32 sel_reg; + u32 sel_val; +} a6xx_clusters[] = { + CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0), + CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0), + CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9), + CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0), + CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0), + CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0), +}; + +static const u32 a6xx_sp_vs_hlsq_cluster[] = { + 0xb800, 0xb803, 0xb820, 0xb822, +}; + +static const u32 a6xx_sp_vs_sp_cluster[] = { + 0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895, + 0xa8a0, 0xa8af, 0xa8c0, 0xa8c3, +}; + +static const u32 a6xx_hlsq_duplicate_cluster[] = { + 0xbb10, 0xbb11, 0xbb20, 0xbb29, +}; + +static const u32 a6xx_hlsq_2d_duplicate_cluster[] = { + 0xbd80, 0xbd80, +}; + +static const u32 a6xx_sp_duplicate_cluster[] = { + 0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20, +}; + +static const u32 a6xx_tp_duplicate_cluster[] = { + 0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382, +}; + +static const u32 a6xx_sp_ps_hlsq_cluster[] = { + 0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2, + 0xb9c0, 0xb9c9, +}; + +static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = { + 0xbd80, 0xbd80, +}; + +static const u32 a6xx_sp_ps_sp_cluster[] = { + 0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3, + 0xaa00, 0xaa00, 0xaa30, 0xaa31, +}; + +static const u32 a6xx_sp_ps_sp_2d_cluster[] = { + 0xacc0, 0xacc0, +}; + +static const u32 a6xx_sp_ps_tp_cluster[] = { + 0xb180, 0xb183, 0xb190, 0xb191, +}; + +static const u32 a6xx_sp_ps_tp_2d_cluster[] = { + 0xb4c0, 0xb4d1, +}; + +#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \ + { .name = #_id, .statetype = _type, .base = _base, \ + .registers = _reg, .count = ARRAY_SIZE(_reg) } + +static const struct a6xx_dbgahb_cluster { + const char *name; + u32 statetype; + u32 base; + const u32 *registers; + size_t count; +} a6xx_dbgahb_clusters[] = { + CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster), + CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster), +}; + +static const u32 a6xx_hlsq_registers[] = { + 0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15, + 0xbe20, 0xbe23, +}; + +static const u32 a6xx_sp_registers[] = { + 0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32, + 0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52, +}; + +static const u32 a6xx_tp_registers[] = { + 0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623, +}; + +struct a6xx_registers { + const u32 *registers; + size_t count; + u32 val0; + u32 val1; +}; + +#define HLSQ_DBG_REGS(_base, _type, _array) \ + { .val0 = _base, .val1 = _type, .registers = _array, \ + .count = ARRAY_SIZE(_array), } + +static const struct a6xx_registers a6xx_hlsq_reglist[] = { + HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers), + HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers), + HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers), +}; + +#define SHADER(_type, _size) \ + { .type = _type, .name = #_type, .size = _size } + +static const struct a6xx_shader_block { + const char *name; + u32 type; + u32 size; +} a6xx_shader_blocks[] = { + SHADER(A6XX_TP0_TMO_DATA, 0x200), + SHADER(A6XX_TP0_SMO_DATA, 0x80), + SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0), + SHADER(A6XX_TP1_TMO_DATA, 0x200), + SHADER(A6XX_TP1_SMO_DATA, 0x80), + SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0), + SHADER(A6XX_SP_INST_DATA, 0x800), + SHADER(A6XX_SP_LB_0_DATA, 0x800), + SHADER(A6XX_SP_LB_1_DATA, 0x800), + SHADER(A6XX_SP_LB_2_DATA, 0x800), + SHADER(A6XX_SP_LB_3_DATA, 0x800), + SHADER(A6XX_SP_LB_4_DATA, 0x800), + SHADER(A6XX_SP_LB_5_DATA, 0x200), + SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800), + SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280), + SHADER(A6XX_SP_UAV_DATA, 0x80), + SHADER(A6XX_SP_INST_TAG, 0x80), + SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80), + SHADER(A6XX_SP_TMO_UMO_TAG, 0x80), + SHADER(A6XX_SP_SMO_TAG, 0x80), + SHADER(A6XX_SP_STATE_DATA, 0x3f), + SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0), + SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280), + SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40), + SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40), + SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4), + SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4), + SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0), + SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580), + SHADER(A6XX_HLSQ_INST_RAM, 0x800), + SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800), + SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800), + SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8), + SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4), + SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80), + SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc), + SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10), + SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28), + SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14), + SHADER(A6XX_HLSQ_DATAPATH_META, 0x40), + SHADER(A6XX_HLSQ_FRONTEND_META, 0x40), + SHADER(A6XX_HLSQ_INDIRECT_META, 0x40), +}; + +static const u32 a6xx_rb_rac_registers[] = { + 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25, + 0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52, +}; + +static const u32 a6xx_rb_rbp_registers[] = { + 0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, + 0x8e53, 0x8e5f, 0x8e70, 0x8e77, +}; + +static const u32 a6xx_registers[] = { + /* RBBM */ + 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b, + 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044, + 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb, + 0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9, + 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533, + 0x0540, 0x0555, + /* CP */ + 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824, + 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f, + 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd, + 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e, + 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e, + 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8, + 0x0a00, 0x0a03, + /* VSC */ + 0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, + /* UCHE */ + 0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, + 0x0e38, 0x0e39, + /* GRAS */ + 0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, + 0x8630, 0x8637, + /* VPC */ + 0x9600, 0x9604, 0x9624, 0x9637, + /* PC */ + 0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19, + 0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34, + 0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff, + /* VFD */ + 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617, + 0xa630, 0xa630, +}; + +#define REGS(_array, _sel_reg, _sel_val) \ + { .registers = _array, .count = ARRAY_SIZE(_array), \ + .val0 = _sel_reg, .val1 = _sel_val } + +static const struct a6xx_registers a6xx_reglist[] = { + REGS(a6xx_registers, 0, 0), + REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0), + REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9), +}; + +static const u32 a6xx_ahb_registers[] = { + /* RBBM_STATUS - RBBM_STATUS3 */ + 0x210, 0x213, + /* CP_STATUS_1 */ + 0x825, 0x825, +}; + +static const u32 a6xx_vbif_registers[] = { + 0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031, + 0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042, + 0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068, + 0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094, + 0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8, + 0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100, + 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, + 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154, + 0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c, + 0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c, + 0x3410, 0x3410, 0x3800, 0x3801, +}; + +static const u32 a6xx_gbif_registers[] = { + 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A, +}; + +static const struct a6xx_registers a6xx_ahb_reglist[] = { + REGS(a6xx_ahb_registers, 0, 0), +}; + +static const struct a6xx_registers a6xx_vbif_reglist = + REGS(a6xx_vbif_registers, 0, 0); + +static const struct a6xx_registers a6xx_gbif_reglist = + REGS(a6xx_gbif_registers, 0, 0); + +static const u32 a6xx_gmu_gx_registers[] = { + /* GMU GX */ + 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b, + 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b, + 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b, + 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084, + 0x0100, 0x012b, 0x0140, 0x0140, +}; + +static const u32 a6xx_gmu_cx_registers[] = { + /* GMU CX */ + 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a, + 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c, + 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089, + 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0, + 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140, + 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154, + 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165, + 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc, + 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201, + /* GMU AO */ + 0x9300, 0x9316, 0x9400, 0x9400, + /* GPU CC */ + 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b, + 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40, + 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002, + 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402, + 0xb800, 0xb802, + /* GPU CC ACD */ + 0xbc00, 0xbc16, 0xbc20, 0xbc27, +}; + +static const u32 a6xx_gmu_cx_rscc_registers[] = { + /* GPU RSCC */ + 0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347, + 0x034c, 0x0387, 0x03ec, 0x03ef, 0x03f4, 0x042f, 0x0494, 0x0497, + 0x049c, 0x04d7, 0x053c, 0x053f, 0x0544, 0x057f, +}; + +static const struct a6xx_registers a6xx_gmu_reglist[] = { + REGS(a6xx_gmu_cx_registers, 0, 0), + REGS(a6xx_gmu_cx_rscc_registers, 0, 0), + REGS(a6xx_gmu_gx_registers, 0, 0), +}; + +static const struct a6xx_indexed_registers { + const char *name; + u32 addr; + u32 data; + u32 count; +} a6xx_indexed_reglist[] = { + { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, + REG_A6XX_CP_SQE_STAT_DATA, 0x33 }, + { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, + REG_A6XX_CP_DRAW_STATE_DATA, 0x100 }, + { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR, + REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 }, + { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR, + REG_A6XX_CP_ROQ_DBG_DATA, 0x400 }, +}; + +static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = { + "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR, + REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, +}; + +#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count } + +static const struct a6xx_debugbus_block { + const char *name; + u32 id; + u32 count; +} a6xx_debugbus_blocks[] = { + DEBUGBUS(A6XX_DBGBUS_CP, 0x100), + DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100), + DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100), + DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100), + DEBUGBUS(A6XX_DBGBUS_DPM, 0x100), + DEBUGBUS(A6XX_DBGBUS_TESS, 0x100), + DEBUGBUS(A6XX_DBGBUS_PC, 0x100), + DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100), + DEBUGBUS(A6XX_DBGBUS_VPC, 0x100), + DEBUGBUS(A6XX_DBGBUS_TSE, 0x100), + DEBUGBUS(A6XX_DBGBUS_RAS, 0x100), + DEBUGBUS(A6XX_DBGBUS_VSC, 0x100), + DEBUGBUS(A6XX_DBGBUS_COM, 0x100), + DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100), + DEBUGBUS(A6XX_DBGBUS_A2D, 0x100), + DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100), + DEBUGBUS(A6XX_DBGBUS_RBP, 0x100), + DEBUGBUS(A6XX_DBGBUS_DCS, 0x100), + DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100), + DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100), + DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100), + DEBUGBUS(A6XX_DBGBUS_GPC, 0x100), + DEBUGBUS(A6XX_DBGBUS_LARC, 0x100), + DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100), + DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100), + DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100), + DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100), + DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100), + DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100), + DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100), + DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100), + DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100), + DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100), + DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100), + DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100), + DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100), + DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100), + DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100), + DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100), +}; + +static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block = + DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100); + +static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = { + DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100), + DEBUGBUS(A6XX_DBGBUS_CX, 0x100), +}; + +#endif diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c new file mode 100644 index 000000000..2cc83e049 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -0,0 +1,734 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ + +#include +#include +#include + +#include "a6xx_gmu.h" +#include "a6xx_gmu.xml.h" +#include "a6xx_gpu.h" + +#define HFI_MSG_ID(val) [val] = #val + +static const char * const a6xx_hfi_msg_id[] = { + HFI_MSG_ID(HFI_H2F_MSG_INIT), + HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION), + HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE), + HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), + HFI_MSG_ID(HFI_H2F_MSG_TEST), + HFI_MSG_ID(HFI_H2F_MSG_START), + HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START), + HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE), + HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER), +}; + +static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, + struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) +{ + struct a6xx_hfi_queue_header *header = queue->header; + u32 i, hdr, index = header->read_index; + + if (header->read_index == header->write_index) { + header->rx_request = 1; + return 0; + } + + hdr = queue->data[index]; + + queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; + + /* + * If we are to assume that the GMU firmware is in fact a rational actor + * and is programmed to not send us a larger response than we expect + * then we can also assume that if the header size is unexpectedly large + * that it is due to memory corruption and/or hardware failure. In this + * case the only reasonable course of action is to BUG() to help harden + * the failure. + */ + + BUG_ON(HFI_HEADER_SIZE(hdr) > dwords); + + for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) { + data[i] = queue->data[index]; + index = (index + 1) % header->size; + } + + if (!gmu->legacy) + index = ALIGN(index, 4) % header->size; + + header->read_index = index; + return HFI_HEADER_SIZE(hdr); +} + +static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, + struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) +{ + struct a6xx_hfi_queue_header *header = queue->header; + u32 i, space, index = header->write_index; + + spin_lock(&queue->lock); + + space = CIRC_SPACE(header->write_index, header->read_index, + header->size); + if (space < dwords) { + header->dropped++; + spin_unlock(&queue->lock); + return -ENOSPC; + } + + queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; + + for (i = 0; i < dwords; i++) { + queue->data[index] = data[i]; + index = (index + 1) % header->size; + } + + /* Cookify any non used data at the end of the write buffer */ + if (!gmu->legacy) { + for (; index % 4; index = (index + 1) % header->size) + queue->data[index] = 0xfafafafa; + } + + header->write_index = index; + spin_unlock(&queue->lock); + + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); + return 0; +} + +static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, + u32 *payload, u32 payload_size) +{ + struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; + u32 val; + int ret; + + /* Wait for a response */ + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, + val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000); + + if (ret) { + DRM_DEV_ERROR(gmu->dev, + "Message %s id %d timed out waiting for response\n", + a6xx_hfi_msg_id[id], seqnum); + return -ETIMEDOUT; + } + + /* Clear the interrupt */ + gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, + A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ); + + for (;;) { + struct a6xx_hfi_msg_response resp; + + /* Get the next packet */ + ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, + sizeof(resp) >> 2); + + /* If the queue is empty our response never made it */ + if (!ret) { + DRM_DEV_ERROR(gmu->dev, + "The HFI response queue is unexpectedly empty\n"); + + return -ENOENT; + } + + if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) { + struct a6xx_hfi_msg_error *error = + (struct a6xx_hfi_msg_error *) &resp; + + DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", + error->code); + continue; + } + + if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) { + DRM_DEV_ERROR(gmu->dev, + "Unexpected message id %d on the response queue\n", + HFI_HEADER_SEQNUM(resp.ret_header)); + continue; + } + + if (resp.error) { + DRM_DEV_ERROR(gmu->dev, + "Message %s id %d returned error %d\n", + a6xx_hfi_msg_id[id], seqnum, resp.error); + return -EINVAL; + } + + /* All is well, copy over the buffer */ + if (payload && payload_size) + memcpy(payload, resp.payload, + min_t(u32, payload_size, sizeof(resp.payload))); + + return 0; + } +} + +static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, + void *data, u32 size, u32 *payload, u32 payload_size) +{ + struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; + int ret, dwords = size >> 2; + u32 seqnum; + + seqnum = atomic_inc_return(&queue->seqnum) % 0xfff; + + /* First dword of the message is the message header - fill it in */ + *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | + (dwords << 8) | id; + + ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); + if (ret) { + DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n", + a6xx_hfi_msg_id[id], seqnum); + return ret; + } + + return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size); +} + +static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) +{ + struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 }; + + msg.dbg_buffer_addr = (u32) gmu->debug.iova; + msg.dbg_buffer_size = (u32) gmu->debug.size; + msg.boot_state = boot_state; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) +{ + struct a6xx_hfi_msg_fw_version msg = { 0 }; + + /* Currently supporting version 1.10 */ + msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17); + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), + version, sizeof(*version)); +} + +static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_perf_table_v1 msg = { 0 }; + int i; + + msg.num_gpu_levels = gmu->nr_gpu_freqs; + msg.num_gmu_levels = gmu->nr_gmu_freqs; + + for (i = 0; i < gmu->nr_gpu_freqs; i++) { + msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; + msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; + } + + for (i = 0; i < gmu->nr_gmu_freqs; i++) { + msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; + msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; + } + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_perf_table msg = { 0 }; + int i; + + msg.num_gpu_levels = gmu->nr_gpu_freqs; + msg.num_gmu_levels = gmu->nr_gmu_freqs; + + for (i = 0; i < gmu->nr_gpu_freqs; i++) { + msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; + msg.gx_votes[i].acd = 0xffffffff; + msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; + } + + for (i = 0; i < gmu->nr_gmu_freqs; i++) { + msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; + msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; + } + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), + NULL, 0); +} + +static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5003c; + msg->ddr_cmds_addrs[2] = 0x5000c; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x5007c; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} + +static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + msg->bw_level_num = 13; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x0; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x50004; + msg->ddr_cmds_addrs[2] = 0x50080; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + msg->ddr_cmds_data[1][0] = 0x6000030c; + msg->ddr_cmds_data[1][1] = 0x600000db; + msg->ddr_cmds_data[1][2] = 0x60000008; + msg->ddr_cmds_data[2][0] = 0x60000618; + msg->ddr_cmds_data[2][1] = 0x600001b6; + msg->ddr_cmds_data[2][2] = 0x60000008; + msg->ddr_cmds_data[3][0] = 0x60000925; + msg->ddr_cmds_data[3][1] = 0x60000291; + msg->ddr_cmds_data[3][2] = 0x60000008; + msg->ddr_cmds_data[4][0] = 0x60000dc1; + msg->ddr_cmds_data[4][1] = 0x600003dc; + msg->ddr_cmds_data[4][2] = 0x60000008; + msg->ddr_cmds_data[5][0] = 0x600010ad; + msg->ddr_cmds_data[5][1] = 0x600004ae; + msg->ddr_cmds_data[5][2] = 0x60000008; + msg->ddr_cmds_data[6][0] = 0x600014c3; + msg->ddr_cmds_data[6][1] = 0x600005d4; + msg->ddr_cmds_data[6][2] = 0x60000008; + msg->ddr_cmds_data[7][0] = 0x6000176a; + msg->ddr_cmds_data[7][1] = 0x60000693; + msg->ddr_cmds_data[7][2] = 0x60000008; + msg->ddr_cmds_data[8][0] = 0x60001f01; + msg->ddr_cmds_data[8][1] = 0x600008b5; + msg->ddr_cmds_data[8][2] = 0x60000008; + msg->ddr_cmds_data[9][0] = 0x60002940; + msg->ddr_cmds_data[9][1] = 0x60000b95; + msg->ddr_cmds_data[9][2] = 0x60000008; + msg->ddr_cmds_data[10][0] = 0x60002f68; + msg->ddr_cmds_data[10][1] = 0x60000d50; + msg->ddr_cmds_data[10][2] = 0x60000008; + msg->ddr_cmds_data[11][0] = 0x60003700; + msg->ddr_cmds_data[11][1] = 0x60000f71; + msg->ddr_cmds_data[11][2] = 0x60000008; + msg->ddr_cmds_data[12][0] = 0x60003fce; + msg->ddr_cmds_data[12][1] = 0x600011ea; + msg->ddr_cmds_data[12][2] = 0x60000008; + + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x0; + + msg->cnoc_cmds_addrs[0] = 0x50054; + + msg->cnoc_cmds_data[0][0] = 0x40000000; +} + +static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5003c; + msg->ddr_cmds_addrs[2] = 0x5000c; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 3; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x50034; + msg->cnoc_cmds_addrs[1] = 0x5007c; + msg->cnoc_cmds_addrs[2] = 0x5004c; + + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[0][1] = 0x00000000; + msg->cnoc_cmds_data[0][2] = 0x40000000; + + msg->cnoc_cmds_data[1][0] = 0x60000001; + msg->cnoc_cmds_data[1][1] = 0x20000001; + msg->cnoc_cmds_data[1][2] = 0x60000001; +} + +static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x50004; + msg->ddr_cmds_addrs[2] = 0x5007c; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x500a4; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} + +static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x01; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x500a0; + msg->ddr_cmds_addrs[2] = 0x50000; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x50070; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} + +static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* + * Send a single "off" entry just to get things running + * TODO: bus scaling + */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; + + msg->ddr_cmds_addrs[0] = 0x50004; + msg->ddr_cmds_addrs[1] = 0x50000; + msg->ddr_cmds_addrs[2] = 0x50088; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes - these are used by the GMU but the + * votes are known and fixed for the target + */ + msg->cnoc_cmds_num = 1; + msg->cnoc_wait_bitmask = 0x01; + + msg->cnoc_cmds_addrs[0] = 0x5006c; + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[1][0] = 0x60000001; +} +static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) +{ + /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ + msg->bw_level_num = 1; + + msg->ddr_cmds_num = 3; + msg->ddr_wait_bitmask = 0x07; + + msg->ddr_cmds_addrs[0] = 0x50000; + msg->ddr_cmds_addrs[1] = 0x5005c; + msg->ddr_cmds_addrs[2] = 0x5000c; + + msg->ddr_cmds_data[0][0] = 0x40000000; + msg->ddr_cmds_data[0][1] = 0x40000000; + msg->ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes. This is used but the values for the + * sdm845 GMU are known and fixed so we can hard code them. + */ + + msg->cnoc_cmds_num = 3; + msg->cnoc_wait_bitmask = 0x05; + + msg->cnoc_cmds_addrs[0] = 0x50034; + msg->cnoc_cmds_addrs[1] = 0x5007c; + msg->cnoc_cmds_addrs[2] = 0x5004c; + + msg->cnoc_cmds_data[0][0] = 0x40000000; + msg->cnoc_cmds_data[0][1] = 0x00000000; + msg->cnoc_cmds_data[0][2] = 0x40000000; + + msg->cnoc_cmds_data[1][0] = 0x60000001; + msg->cnoc_cmds_data[1][1] = 0x20000001; + msg->cnoc_cmds_data[1][2] = 0x60000001; +} + + +static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_bw_table msg = { 0 }; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + + if (adreno_is_a618(adreno_gpu)) + a618_build_bw_table(&msg); + else if (adreno_is_a619(adreno_gpu)) + a619_build_bw_table(&msg); + else if (adreno_is_a640_family(adreno_gpu)) + a640_build_bw_table(&msg); + else if (adreno_is_a650(adreno_gpu)) + a650_build_bw_table(&msg); + else if (adreno_is_7c3(adreno_gpu)) + adreno_7c3_build_bw_table(&msg); + else if (adreno_is_a660(adreno_gpu)) + a660_build_bw_table(&msg); + else + a6xx_build_bw_table(&msg); + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_test msg = { 0 }; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_send_start(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_start msg = { 0 }; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_core_fw_start msg = { 0 }; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg, + sizeof(msg), NULL, 0); +} + +int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) +{ + struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 }; + + msg.ack_type = 1; /* blocking */ + msg.freq = index; + msg.bw = 0; /* TODO: bus scaling */ + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg, + sizeof(msg), NULL, 0); +} + +int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_prep_slumber_cmd msg = { 0 }; + + /* TODO: should freq and bw fields be non-zero ? */ + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg, + sizeof(msg), NULL, 0); +} + +static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) +{ + int ret; + + ret = a6xx_hfi_send_gmu_init(gmu, boot_state); + if (ret) + return ret; + + ret = a6xx_hfi_get_fw_version(gmu, NULL); + if (ret) + return ret; + + /* + * We have to get exchange version numbers per the sequence but at this + * point th kernel driver doesn't need to know the exact version of + * the GMU firmware + */ + + ret = a6xx_hfi_send_perf_table_v1(gmu); + if (ret) + return ret; + + ret = a6xx_hfi_send_bw_table(gmu); + if (ret) + return ret; + + /* + * Let the GMU know that there won't be any more HFI messages until next + * boot + */ + a6xx_hfi_send_test(gmu); + + return 0; +} + +int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) +{ + int ret; + + if (gmu->legacy) + return a6xx_hfi_start_v1(gmu, boot_state); + + + ret = a6xx_hfi_send_perf_table(gmu); + if (ret) + return ret; + + ret = a6xx_hfi_send_bw_table(gmu); + if (ret) + return ret; + + ret = a6xx_hfi_send_core_fw_start(gmu); + if (ret) + return ret; + + /* + * Downstream driver sends this in its "a6xx_hw_init" equivalent, + * but seems to be no harm in sending it here + */ + ret = a6xx_hfi_send_start(gmu); + if (ret) + return ret; + + return 0; +} + +void a6xx_hfi_stop(struct a6xx_gmu *gmu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { + struct a6xx_hfi_queue *queue = &gmu->queues[i]; + + if (!queue->header) + continue; + + if (queue->header->read_index != queue->header->write_index) + DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i); + + queue->header->read_index = 0; + queue->header->write_index = 0; + + memset(&queue->history, 0xff, sizeof(queue->history)); + queue->history_idx = 0; + } +} + +static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, + struct a6xx_hfi_queue_header *header, void *virt, u64 iova, + u32 id) +{ + spin_lock_init(&queue->lock); + queue->header = header; + queue->data = virt; + atomic_set(&queue->seqnum, 0); + + memset(&queue->history, 0xff, sizeof(queue->history)); + queue->history_idx = 0; + + /* Set up the shared memory header */ + header->iova = iova; + header->type = 10 << 8 | id; + header->status = 1; + header->size = SZ_4K >> 2; + header->msg_size = 0; + header->dropped = 0; + header->rx_watermark = 1; + header->tx_watermark = 1; + header->rx_request = 1; + header->tx_request = 0; + header->read_index = 0; + header->write_index = 0; +} + +void a6xx_hfi_init(struct a6xx_gmu *gmu) +{ + struct a6xx_gmu_bo *hfi = &gmu->hfi; + struct a6xx_hfi_queue_table_header *table = hfi->virt; + struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); + u64 offset; + int table_size; + + /* + * The table size is the size of the table header plus all of the queue + * headers + */ + table_size = sizeof(*table); + table_size += (ARRAY_SIZE(gmu->queues) * + sizeof(struct a6xx_hfi_queue_header)); + + table->version = 0; + table->size = table_size; + /* First queue header is located immediately after the table header */ + table->qhdr0_offset = sizeof(*table) >> 2; + table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2; + table->num_queues = ARRAY_SIZE(gmu->queues); + table->active_queues = ARRAY_SIZE(gmu->queues); + + /* Command queue */ + offset = SZ_4K; + a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, + hfi->iova + offset, 0); + + /* GMU response queue */ + offset += SZ_4K; + a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, + hfi->iova + offset, gmu->legacy ? 4 : 1); +} diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h new file mode 100644 index 000000000..528110169 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */ + +#ifndef _A6XX_HFI_H_ +#define _A6XX_HFI_H_ + +struct a6xx_hfi_queue_table_header { + u32 version; + u32 size; /* Size of the queue table in dwords */ + u32 qhdr0_offset; /* Offset of the first queue header */ + u32 qhdr_size; /* Size of the queue headers */ + u32 num_queues; /* Number of total queues */ + u32 active_queues; /* Number of active queues */ +}; + +struct a6xx_hfi_queue_header { + u32 status; + u32 iova; + u32 type; + u32 size; + u32 msg_size; + u32 dropped; + u32 rx_watermark; + u32 tx_watermark; + u32 rx_request; + u32 tx_request; + u32 read_index; + u32 write_index; +}; + +struct a6xx_hfi_queue { + struct a6xx_hfi_queue_header *header; + spinlock_t lock; + u32 *data; + atomic_t seqnum; + + /* + * Tracking for the start index of the last N messages in the + * queue, for the benefit of devcore dump / crashdec (since + * parsing in the reverse direction to decode the last N + * messages is difficult to do and would rely on heuristics + * which are not guaranteed to be correct) + */ +#define HFI_HISTORY_SZ 8 + s32 history[HFI_HISTORY_SZ]; + u8 history_idx; +}; + +/* This is the outgoing queue to the GMU */ +#define HFI_COMMAND_QUEUE 0 + +/* THis is the incoming response queue from the GMU */ +#define HFI_RESPONSE_QUEUE 1 + +#define HFI_HEADER_ID(msg) ((msg) & 0xff) +#define HFI_HEADER_SIZE(msg) (((msg) >> 8) & 0xff) +#define HFI_HEADER_SEQNUM(msg) (((msg) >> 20) & 0xfff) + +/* FIXME: Do we need this or can we use ARRAY_SIZE? */ +#define HFI_RESPONSE_PAYLOAD_SIZE 16 + +/* HFI message types */ + +#define HFI_MSG_CMD 0 +#define HFI_MSG_ACK 1 +#define HFI_MSG_ACK_V1 2 + +#define HFI_F2H_MSG_ACK 126 + +struct a6xx_hfi_msg_response { + u32 header; + u32 ret_header; + u32 error; + u32 payload[HFI_RESPONSE_PAYLOAD_SIZE]; +}; + +#define HFI_F2H_MSG_ERROR 100 + +struct a6xx_hfi_msg_error { + u32 header; + u32 code; + u32 payload[2]; +}; + +#define HFI_H2F_MSG_INIT 0 + +struct a6xx_hfi_msg_gmu_init_cmd { + u32 header; + u32 seg_id; + u32 dbg_buffer_addr; + u32 dbg_buffer_size; + u32 boot_state; +}; + +#define HFI_H2F_MSG_FW_VERSION 1 + +struct a6xx_hfi_msg_fw_version { + u32 header; + u32 supported_version; +}; + +#define HFI_H2F_MSG_PERF_TABLE 4 + +struct perf_level { + u32 vote; + u32 freq; +}; + +struct perf_gx_level { + u32 vote; + u32 acd; + u32 freq; +}; + +struct a6xx_hfi_msg_perf_table_v1 { + u32 header; + u32 num_gpu_levels; + u32 num_gmu_levels; + + struct perf_level gx_votes[16]; + struct perf_level cx_votes[4]; +}; + +struct a6xx_hfi_msg_perf_table { + u32 header; + u32 num_gpu_levels; + u32 num_gmu_levels; + + struct perf_gx_level gx_votes[16]; + struct perf_level cx_votes[4]; +}; + +#define HFI_H2F_MSG_BW_TABLE 3 + +struct a6xx_hfi_msg_bw_table { + u32 header; + u32 bw_level_num; + u32 cnoc_cmds_num; + u32 ddr_cmds_num; + u32 cnoc_wait_bitmask; + u32 ddr_wait_bitmask; + u32 cnoc_cmds_addrs[6]; + u32 cnoc_cmds_data[2][6]; + u32 ddr_cmds_addrs[8]; + u32 ddr_cmds_data[16][8]; +}; + +#define HFI_H2F_MSG_TEST 5 + +struct a6xx_hfi_msg_test { + u32 header; +}; + +#define HFI_H2F_MSG_START 10 + +struct a6xx_hfi_msg_start { + u32 header; +}; + +#define HFI_H2F_MSG_CORE_FW_START 14 + +struct a6xx_hfi_msg_core_fw_start { + u32 header; + u32 handle; +}; + +#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30 + +struct a6xx_hfi_gx_bw_perf_vote_cmd { + u32 header; + u32 ack_type; + u32 freq; + u32 bw; +}; + +#define HFI_H2F_MSG_PREPARE_SLUMBER 33 + +struct a6xx_hfi_prep_slumber_cmd { + u32 header; + u32 bw; + u32 freq; +}; + +#endif diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h new file mode 100644 index 000000000..abb037ccc --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h @@ -0,0 +1,685 @@ +#ifndef ADRENO_COMMON_XML +#define ADRENO_COMMON_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum chip { + A2XX = 0, + A3XX = 0, + A4XX = 0, + A5XX = 0, + A6XX = 0, +}; + +enum adreno_pa_su_sc_draw { + PC_DRAW_POINTS = 0, + PC_DRAW_LINES = 1, + PC_DRAW_TRIANGLES = 2, +}; + +enum adreno_compare_func { + FUNC_NEVER = 0, + FUNC_LESS = 1, + FUNC_EQUAL = 2, + FUNC_LEQUAL = 3, + FUNC_GREATER = 4, + FUNC_NOTEQUAL = 5, + FUNC_GEQUAL = 6, + FUNC_ALWAYS = 7, +}; + +enum adreno_stencil_op { + STENCIL_KEEP = 0, + STENCIL_ZERO = 1, + STENCIL_REPLACE = 2, + STENCIL_INCR_CLAMP = 3, + STENCIL_DECR_CLAMP = 4, + STENCIL_INVERT = 5, + STENCIL_INCR_WRAP = 6, + STENCIL_DECR_WRAP = 7, +}; + +enum adreno_rb_blend_factor { + FACTOR_ZERO = 0, + FACTOR_ONE = 1, + FACTOR_SRC_COLOR = 4, + FACTOR_ONE_MINUS_SRC_COLOR = 5, + FACTOR_SRC_ALPHA = 6, + FACTOR_ONE_MINUS_SRC_ALPHA = 7, + FACTOR_DST_COLOR = 8, + FACTOR_ONE_MINUS_DST_COLOR = 9, + FACTOR_DST_ALPHA = 10, + FACTOR_ONE_MINUS_DST_ALPHA = 11, + FACTOR_CONSTANT_COLOR = 12, + FACTOR_ONE_MINUS_CONSTANT_COLOR = 13, + FACTOR_CONSTANT_ALPHA = 14, + FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15, + FACTOR_SRC_ALPHA_SATURATE = 16, + FACTOR_SRC1_COLOR = 20, + FACTOR_ONE_MINUS_SRC1_COLOR = 21, + FACTOR_SRC1_ALPHA = 22, + FACTOR_ONE_MINUS_SRC1_ALPHA = 23, +}; + +enum adreno_rb_surface_endian { + ENDIAN_NONE = 0, + ENDIAN_8IN16 = 1, + ENDIAN_8IN32 = 2, + ENDIAN_16IN32 = 3, + ENDIAN_8IN64 = 4, + ENDIAN_8IN128 = 5, +}; + +enum adreno_rb_dither_mode { + DITHER_DISABLE = 0, + DITHER_ALWAYS = 1, + DITHER_IF_ALPHA_OFF = 2, +}; + +enum adreno_rb_depth_format { + DEPTHX_16 = 0, + DEPTHX_24_8 = 1, + DEPTHX_32 = 2, +}; + +enum adreno_rb_copy_control_mode { + RB_COPY_RESOLVE = 1, + RB_COPY_CLEAR = 2, + RB_COPY_DEPTH_STENCIL = 5, +}; + +enum a3xx_rop_code { + ROP_CLEAR = 0, + ROP_NOR = 1, + ROP_AND_INVERTED = 2, + ROP_COPY_INVERTED = 3, + ROP_AND_REVERSE = 4, + ROP_INVERT = 5, + ROP_NAND = 7, + ROP_AND = 8, + ROP_EQUIV = 9, + ROP_NOOP = 10, + ROP_OR_INVERTED = 11, + ROP_OR_REVERSE = 13, + ROP_OR = 14, + ROP_SET = 15, +}; + +enum a3xx_render_mode { + RB_RENDERING_PASS = 0, + RB_TILING_PASS = 1, + RB_RESOLVE_PASS = 2, + RB_COMPUTE_PASS = 3, +}; + +enum a3xx_msaa_samples { + MSAA_ONE = 0, + MSAA_TWO = 1, + MSAA_FOUR = 2, + MSAA_EIGHT = 3, +}; + +enum a3xx_threadmode { + MULTI = 0, + SINGLE = 1, +}; + +enum a3xx_instrbuffermode { + CACHE = 0, + BUFFER = 1, +}; + +enum a3xx_threadsize { + TWO_QUADS = 0, + FOUR_QUADS = 1, +}; + +enum a3xx_color_swap { + WZYX = 0, + WXYZ = 1, + ZYXW = 2, + XYZW = 3, +}; + +enum a3xx_rb_blend_opcode { + BLEND_DST_PLUS_SRC = 0, + BLEND_SRC_MINUS_DST = 1, + BLEND_DST_MINUS_SRC = 2, + BLEND_MIN_DST_SRC = 3, + BLEND_MAX_DST_SRC = 4, +}; + +enum a4xx_tess_spacing { + EQUAL_SPACING = 0, + ODD_SPACING = 2, + EVEN_SPACING = 3, +}; + +enum a5xx_address_mode { + ADDR_32B = 0, + ADDR_64B = 1, +}; + +enum a5xx_line_mode { + BRESENHAM = 0, + RECTANGULAR = 1, +}; + +#define REG_AXXX_CP_RB_BASE 0x000001c0 + +#define REG_AXXX_CP_RB_CNTL 0x000001c1 +#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f +#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0 +static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val) +{ + return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK; +} +#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00 +#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8 +static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val) +{ + return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK; +} +#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000 +#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16 +static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val) +{ + return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK; +} +#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000 +#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000 +#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000 + +#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3 +#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003 +#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0 +static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val) +{ + return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK; +} +#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc +#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2 +static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val) +{ + return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK; +} + +#define REG_AXXX_CP_RB_RPTR 0x000001c4 + +#define REG_AXXX_CP_RB_WPTR 0x000001c5 + +#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6 + +#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7 + +#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8 + +#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5 +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0 +static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val) +{ + return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK; +} +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00 +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8 +static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val) +{ + return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK; +} +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000 +#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16 +static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val) +{ + return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK; +} + +#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6 +#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000 +#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16 +static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val) +{ + return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK; +} +#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000 +#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24 +static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val) +{ + return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK; +} + +#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7 +#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f +#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK; +} +#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00 +#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8 +static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK; +} +#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000 +#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK; +} + +#define REG_AXXX_CP_STQ_AVAIL 0x000001d8 +#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f +#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0 +static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val) +{ + return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK; +} + +#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9 +#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f +#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0 +static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val) +{ + return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK; +} + +#define REG_AXXX_SCRATCH_UMSK 0x000001dc +#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff +#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0 +static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val) +{ + return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK; +} +#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000 +#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16 +static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val) +{ + return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK; +} + +#define REG_AXXX_SCRATCH_ADDR 0x000001dd + +#define REG_AXXX_CP_ME_RDADDR 0x000001ea + +#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec + +#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed + +#define REG_AXXX_CP_INT_CNTL 0x000001f2 +#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000 +#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000 +#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000 +#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000 +#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000 +#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000 +#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000 +#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000 +#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000 + +#define REG_AXXX_CP_INT_STATUS 0x000001f3 + +#define REG_AXXX_CP_INT_ACK 0x000001f4 + +#define REG_AXXX_CP_ME_CNTL 0x000001f6 +#define AXXX_CP_ME_CNTL_BUSY 0x20000000 +#define AXXX_CP_ME_CNTL_HALT 0x10000000 + +#define REG_AXXX_CP_ME_STATUS 0x000001f7 + +#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8 + +#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9 + +#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa + +#define REG_AXXX_CP_DEBUG 0x000001fc +#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000 +#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000 +#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000 +#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000 +#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000 +#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000 +#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000 +#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000 + +#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd +#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f +#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK; +} +#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000 +#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK; +} + +#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe +#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f +#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK; +} +#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000 +#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK; +} + +#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff +#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f +#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0 +static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK; +} +#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000 +#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16 +static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val) +{ + return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK; +} + +#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440 + +#define REG_AXXX_CP_STQ_ST_STAT 0x00000443 + +#define REG_AXXX_CP_ST_BASE 0x0000044d + +#define REG_AXXX_CP_ST_BUFSZ 0x0000044e + +#define REG_AXXX_CP_MEQ_STAT 0x0000044f + +#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452 + +#define REG_AXXX_CP_BIN_MASK_LO 0x00000454 + +#define REG_AXXX_CP_BIN_MASK_HI 0x00000455 + +#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456 + +#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457 + +#define REG_AXXX_CP_IB1_BASE 0x00000458 + +#define REG_AXXX_CP_IB1_BUFSZ 0x00000459 + +#define REG_AXXX_CP_IB2_BASE 0x0000045a + +#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b + +#define REG_AXXX_CP_STAT 0x0000047f +#define AXXX_CP_STAT_CP_BUSY__MASK 0x80000000 +#define AXXX_CP_STAT_CP_BUSY__SHIFT 31 +static inline uint32_t AXXX_CP_STAT_CP_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CP_BUSY__SHIFT) & AXXX_CP_STAT_CP_BUSY__MASK; +} +#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK 0x40000000 +#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT 30 +static inline uint32_t AXXX_CP_STAT_VS_EVENT_FIFO_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK; +} +#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK 0x20000000 +#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT 29 +static inline uint32_t AXXX_CP_STAT_PS_EVENT_FIFO_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK; +} +#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK 0x10000000 +#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT 28 +static inline uint32_t AXXX_CP_STAT_CF_EVENT_FIFO_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK; +} +#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK 0x08000000 +#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT 27 +static inline uint32_t AXXX_CP_STAT_RB_EVENT_FIFO_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK; +} +#define AXXX_CP_STAT_ME_BUSY__MASK 0x04000000 +#define AXXX_CP_STAT_ME_BUSY__SHIFT 26 +static inline uint32_t AXXX_CP_STAT_ME_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_ME_BUSY__SHIFT) & AXXX_CP_STAT_ME_BUSY__MASK; +} +#define AXXX_CP_STAT_MIU_WR_C_BUSY__MASK 0x02000000 +#define AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT 25 +static inline uint32_t AXXX_CP_STAT_MIU_WR_C_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT) & AXXX_CP_STAT_MIU_WR_C_BUSY__MASK; +} +#define AXXX_CP_STAT_CP_3D_BUSY__MASK 0x00800000 +#define AXXX_CP_STAT_CP_3D_BUSY__SHIFT 23 +static inline uint32_t AXXX_CP_STAT_CP_3D_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CP_3D_BUSY__SHIFT) & AXXX_CP_STAT_CP_3D_BUSY__MASK; +} +#define AXXX_CP_STAT_CP_NRT_BUSY__MASK 0x00400000 +#define AXXX_CP_STAT_CP_NRT_BUSY__SHIFT 22 +static inline uint32_t AXXX_CP_STAT_CP_NRT_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CP_NRT_BUSY__SHIFT) & AXXX_CP_STAT_CP_NRT_BUSY__MASK; +} +#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK 0x00200000 +#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT 21 +static inline uint32_t AXXX_CP_STAT_RBIU_SCRATCH_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK; +} +#define AXXX_CP_STAT_RCIU_ME_BUSY__MASK 0x00100000 +#define AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT 20 +static inline uint32_t AXXX_CP_STAT_RCIU_ME_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_ME_BUSY__MASK; +} +#define AXXX_CP_STAT_RCIU_PFP_BUSY__MASK 0x00080000 +#define AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT 19 +static inline uint32_t AXXX_CP_STAT_RCIU_PFP_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_PFP_BUSY__MASK; +} +#define AXXX_CP_STAT_MEQ_RING_BUSY__MASK 0x00040000 +#define AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT 18 +static inline uint32_t AXXX_CP_STAT_MEQ_RING_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT) & AXXX_CP_STAT_MEQ_RING_BUSY__MASK; +} +#define AXXX_CP_STAT_PFP_BUSY__MASK 0x00020000 +#define AXXX_CP_STAT_PFP_BUSY__SHIFT 17 +static inline uint32_t AXXX_CP_STAT_PFP_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_PFP_BUSY__SHIFT) & AXXX_CP_STAT_PFP_BUSY__MASK; +} +#define AXXX_CP_STAT_ST_QUEUE_BUSY__MASK 0x00010000 +#define AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT 16 +static inline uint32_t AXXX_CP_STAT_ST_QUEUE_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_ST_QUEUE_BUSY__MASK; +} +#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK 0x00002000 +#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT 13 +static inline uint32_t AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK; +} +#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK 0x00001000 +#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT 12 +static inline uint32_t AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK; +} +#define AXXX_CP_STAT_RING_QUEUE_BUSY__MASK 0x00000800 +#define AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT 11 +static inline uint32_t AXXX_CP_STAT_RING_QUEUE_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_RING_QUEUE_BUSY__MASK; +} +#define AXXX_CP_STAT_CSF_BUSY__MASK 0x00000400 +#define AXXX_CP_STAT_CSF_BUSY__SHIFT 10 +static inline uint32_t AXXX_CP_STAT_CSF_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CSF_BUSY__SHIFT) & AXXX_CP_STAT_CSF_BUSY__MASK; +} +#define AXXX_CP_STAT_CSF_ST_BUSY__MASK 0x00000200 +#define AXXX_CP_STAT_CSF_ST_BUSY__SHIFT 9 +static inline uint32_t AXXX_CP_STAT_CSF_ST_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CSF_ST_BUSY__SHIFT) & AXXX_CP_STAT_CSF_ST_BUSY__MASK; +} +#define AXXX_CP_STAT_EVENT_BUSY__MASK 0x00000100 +#define AXXX_CP_STAT_EVENT_BUSY__SHIFT 8 +static inline uint32_t AXXX_CP_STAT_EVENT_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_EVENT_BUSY__SHIFT) & AXXX_CP_STAT_EVENT_BUSY__MASK; +} +#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK 0x00000080 +#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT 7 +static inline uint32_t AXXX_CP_STAT_CSF_INDIRECT2_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK; +} +#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK 0x00000040 +#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT 6 +static inline uint32_t AXXX_CP_STAT_CSF_INDIRECTS_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK; +} +#define AXXX_CP_STAT_CSF_RING_BUSY__MASK 0x00000020 +#define AXXX_CP_STAT_CSF_RING_BUSY__SHIFT 5 +static inline uint32_t AXXX_CP_STAT_CSF_RING_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_CSF_RING_BUSY__SHIFT) & AXXX_CP_STAT_CSF_RING_BUSY__MASK; +} +#define AXXX_CP_STAT_RCIU_BUSY__MASK 0x00000010 +#define AXXX_CP_STAT_RCIU_BUSY__SHIFT 4 +static inline uint32_t AXXX_CP_STAT_RCIU_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_RCIU_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_BUSY__MASK; +} +#define AXXX_CP_STAT_RBIU_BUSY__MASK 0x00000008 +#define AXXX_CP_STAT_RBIU_BUSY__SHIFT 3 +static inline uint32_t AXXX_CP_STAT_RBIU_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_RBIU_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_BUSY__MASK; +} +#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK 0x00000004 +#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT 2 +static inline uint32_t AXXX_CP_STAT_MIU_RD_RETURN_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK; +} +#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK 0x00000002 +#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT 1 +static inline uint32_t AXXX_CP_STAT_MIU_RD_REQ_BUSY(uint32_t val) +{ + return ((val) << AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK; +} +#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001 + +#define REG_AXXX_CP_SCRATCH_REG0 0x00000578 + +#define REG_AXXX_CP_SCRATCH_REG1 0x00000579 + +#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a + +#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b + +#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c + +#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d + +#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e + +#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f + +#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600 + +#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601 + +#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602 + +#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603 + +#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604 + +#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605 + +#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606 + +#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607 + +#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608 + +#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609 + +#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a + +#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b + +#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c + +#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d + +#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e + +#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612 + +#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613 + +#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614 + + +#endif /* ADRENO_COMMON_XML */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c new file mode 100644 index 000000000..ed1e0c650 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -0,0 +1,746 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013-2014 Red Hat + * Author: Rob Clark + * + * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved. + */ + +#include "adreno_gpu.h" + +bool hang_debug = false; +MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); +module_param_named(hang_debug, hang_debug, bool, 0600); + +bool snapshot_debugbus = false; +MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)"); +module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600); + +bool allow_vram_carveout = false; +MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU"); +module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600); + +static const struct adreno_info gpulist[] = { + { + .rev = ADRENO_REV(2, 0, 0, 0), + .revn = 200, + .name = "A200", + .fw = { + [ADRENO_FW_PM4] = "yamato_pm4.fw", + [ADRENO_FW_PFP] = "yamato_pfp.fw", + }, + .gmem = SZ_256K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a2xx_gpu_init, + }, { /* a200 on i.mx51 has only 128kib gmem */ + .rev = ADRENO_REV(2, 0, 0, 1), + .revn = 201, + .name = "A200", + .fw = { + [ADRENO_FW_PM4] = "yamato_pm4.fw", + [ADRENO_FW_PFP] = "yamato_pfp.fw", + }, + .gmem = SZ_128K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a2xx_gpu_init, + }, { + .rev = ADRENO_REV(2, 2, 0, ANY_ID), + .revn = 220, + .name = "A220", + .fw = { + [ADRENO_FW_PM4] = "leia_pm4_470.fw", + [ADRENO_FW_PFP] = "leia_pfp_470.fw", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a2xx_gpu_init, + }, { + .rev = ADRENO_REV(3, 0, 5, ANY_ID), + .revn = 305, + .name = "A305", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, + .gmem = SZ_256K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a3xx_gpu_init, + }, { + .rev = ADRENO_REV(3, 0, 6, 0), + .revn = 307, /* because a305c is revn==306 */ + .name = "A306", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, + .gmem = SZ_128K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a3xx_gpu_init, + }, { + .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), + .revn = 320, + .name = "A320", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a3xx_gpu_init, + }, { + .rev = ADRENO_REV(3, 3, 0, ANY_ID), + .revn = 330, + .name = "A330", + .fw = { + [ADRENO_FW_PM4] = "a330_pm4.fw", + [ADRENO_FW_PFP] = "a330_pfp.fw", + }, + .gmem = SZ_1M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a3xx_gpu_init, + }, { + .rev = ADRENO_REV(4, 0, 5, ANY_ID), + .revn = 405, + .name = "A405", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, + .gmem = SZ_256K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a4xx_gpu_init, + }, { + .rev = ADRENO_REV(4, 2, 0, ANY_ID), + .revn = 420, + .name = "A420", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, + .gmem = (SZ_1M + SZ_512K), + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a4xx_gpu_init, + }, { + .rev = ADRENO_REV(4, 3, 0, ANY_ID), + .revn = 430, + .name = "A430", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, + .gmem = (SZ_1M + SZ_512K), + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a4xx_gpu_init, + }, { + .rev = ADRENO_REV(5, 0, 6, ANY_ID), + .revn = 506, + .name = "A506", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_128K + SZ_8K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | + ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a506_zap.mdt", + }, { + .rev = ADRENO_REV(5, 0, 8, ANY_ID), + .revn = 508, + .name = "A508", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_128K + SZ_8K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a508_zap.mdt", + }, { + .rev = ADRENO_REV(5, 0, 9, ANY_ID), + .revn = 509, + .name = "A509", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_256K + SZ_16K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + /* Adreno 509 uses the same ZAP as 512 */ + .zapfw = "a512_zap.mdt", + }, { + .rev = ADRENO_REV(5, 1, 0, ANY_ID), + .revn = 510, + .name = "A510", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = SZ_256K, + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .init = a5xx_gpu_init, + }, { + .rev = ADRENO_REV(5, 1, 2, ANY_ID), + .revn = 512, + .name = "A512", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + }, + .gmem = (SZ_256K + SZ_16K), + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a512_zap.mdt", + }, { + .rev = ADRENO_REV(5, 3, 0, 2), + .revn = 530, + .name = "A530", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2", + }, + .gmem = SZ_1M, + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | + ADRENO_QUIRK_FAULT_DETECT_MASK, + .init = a5xx_gpu_init, + .zapfw = "a530_zap.mdt", + }, { + .rev = ADRENO_REV(5, 4, 0, ANY_ID), + .revn = 540, + .name = "A540", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + [ADRENO_FW_GPMU] = "a540_gpmu.fw2", + }, + .gmem = SZ_1M, + /* + * Increase inactive period to 250 to avoid bouncing + * the GDSC which appears to make it grumpy + */ + .inactive_period = 250, + .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE, + .init = a5xx_gpu_init, + .zapfw = "a540_zap.mdt", + }, { + .rev = ADRENO_REV(6, 1, 8, ANY_ID), + .revn = 618, + .name = "A618", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a630_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + }, { + .rev = ADRENO_REV(6, 1, 9, ANY_ID), + .revn = 619, + .name = "A619", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a619_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a615_zap.mdt", + .hwcg = a615_hwcg, + }, { + .rev = ADRENO_REV(6, 3, 0, ANY_ID), + .revn = 630, + .name = "A630", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a630_gmu.bin", + }, + .gmem = SZ_1M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a630_zap.mdt", + .hwcg = a630_hwcg, + }, { + .rev = ADRENO_REV(6, 4, 0, ANY_ID), + .revn = 640, + .name = "A640", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a640_gmu.bin", + }, + .gmem = SZ_1M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a640_zap.mdt", + .hwcg = a640_hwcg, + }, { + .rev = ADRENO_REV(6, 5, 0, ANY_ID), + .revn = 650, + .name = "A650", + .fw = { + [ADRENO_FW_SQE] = "a650_sqe.fw", + [ADRENO_FW_GMU] = "a650_gmu.bin", + }, + .gmem = SZ_1M + SZ_128K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a650_zap.mdt", + .hwcg = a650_hwcg, + .address_space_size = SZ_16G, + }, { + .rev = ADRENO_REV(6, 6, 0, ANY_ID), + .revn = 660, + .name = "A660", + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a660_gmu.bin", + }, + .gmem = SZ_1M + SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a660_zap.mdt", + .hwcg = a660_hwcg, + .address_space_size = SZ_16G, + }, { + .rev = ADRENO_REV(6, 3, 5, ANY_ID), + .fw = { + [ADRENO_FW_SQE] = "a660_sqe.fw", + [ADRENO_FW_GMU] = "a660_gmu.bin", + }, + .gmem = SZ_512K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .hwcg = a660_hwcg, + .address_space_size = SZ_16G, + }, { + .rev = ADRENO_REV(6, 8, 0, ANY_ID), + .revn = 680, + .name = "A680", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a640_gmu.bin", + }, + .gmem = SZ_2M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a640_zap.mdt", + .hwcg = a640_hwcg, + }, +}; + +MODULE_FIRMWARE("qcom/a300_pm4.fw"); +MODULE_FIRMWARE("qcom/a300_pfp.fw"); +MODULE_FIRMWARE("qcom/a330_pm4.fw"); +MODULE_FIRMWARE("qcom/a330_pfp.fw"); +MODULE_FIRMWARE("qcom/a420_pm4.fw"); +MODULE_FIRMWARE("qcom/a420_pfp.fw"); +MODULE_FIRMWARE("qcom/a530_pm4.fw"); +MODULE_FIRMWARE("qcom/a530_pfp.fw"); +MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2"); +MODULE_FIRMWARE("qcom/a530_zap.mdt"); +MODULE_FIRMWARE("qcom/a530_zap.b00"); +MODULE_FIRMWARE("qcom/a530_zap.b01"); +MODULE_FIRMWARE("qcom/a530_zap.b02"); +MODULE_FIRMWARE("qcom/a619_gmu.bin"); +MODULE_FIRMWARE("qcom/a630_sqe.fw"); +MODULE_FIRMWARE("qcom/a630_gmu.bin"); +MODULE_FIRMWARE("qcom/a630_zap.mbn"); + +static inline bool _rev_match(uint8_t entry, uint8_t id) +{ + return (entry == ANY_ID) || (entry == id); +} + +bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2) +{ + + return _rev_match(rev1.core, rev2.core) && + _rev_match(rev1.major, rev2.major) && + _rev_match(rev1.minor, rev2.minor) && + _rev_match(rev1.patchid, rev2.patchid); +} + +const struct adreno_info *adreno_info(struct adreno_rev rev) +{ + int i; + + /* identify gpu: */ + for (i = 0; i < ARRAY_SIZE(gpulist); i++) { + const struct adreno_info *info = &gpulist[i]; + if (adreno_cmp_rev(info->rev, rev)) + return info; + } + + return NULL; +} + +struct msm_gpu *adreno_load_gpu(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + struct msm_gpu *gpu = NULL; + struct adreno_gpu *adreno_gpu; + int ret; + + if (pdev) + gpu = dev_to_gpu(&pdev->dev); + + if (!gpu) { + dev_err_once(dev->dev, "no GPU device was found\n"); + return NULL; + } + + adreno_gpu = to_adreno_gpu(gpu); + + /* + * The number one reason for HW init to fail is if the firmware isn't + * loaded yet. Try that first and don't bother continuing on + * otherwise + */ + + ret = adreno_load_fw(adreno_gpu); + if (ret) + return NULL; + + /* + * Now that we have firmware loaded, and are ready to begin + * booting the gpu, go ahead and enable runpm: + */ + pm_runtime_enable(&pdev->dev); + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret); + goto err_disable_rpm; + } + + mutex_lock(&gpu->lock); + ret = msm_gpu_hw_init(gpu); + mutex_unlock(&gpu->lock); + if (ret) { + DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); + goto err_put_rpm; + } + + pm_runtime_put_autosuspend(&pdev->dev); + +#ifdef CONFIG_DEBUG_FS + if (gpu->funcs->debugfs_init) { + gpu->funcs->debugfs_init(gpu, dev->primary); + gpu->funcs->debugfs_init(gpu, dev->render); + } +#endif + + return gpu; + +err_put_rpm: + pm_runtime_put_sync_suspend(&pdev->dev); +err_disable_rpm: + pm_runtime_disable(&pdev->dev); + + return NULL; +} + +static int find_chipid(struct device *dev, struct adreno_rev *rev) +{ + struct device_node *node = dev->of_node; + const char *compat; + int ret; + u32 chipid; + + /* first search the compat strings for qcom,adreno-XYZ.W: */ + ret = of_property_read_string_index(node, "compatible", 0, &compat); + if (ret == 0) { + unsigned int r, patch; + + if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 || + sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) { + rev->core = r / 100; + r %= 100; + rev->major = r / 10; + r %= 10; + rev->minor = r; + rev->patchid = patch; + + return 0; + } + } + + /* and if that fails, fall back to legacy "qcom,chipid" property: */ + ret = of_property_read_u32(node, "qcom,chipid", &chipid); + if (ret) { + DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret); + return ret; + } + + rev->core = (chipid >> 24) & 0xff; + rev->major = (chipid >> 16) & 0xff; + rev->minor = (chipid >> 8) & 0xff; + rev->patchid = (chipid & 0xff); + + dev_warn(dev, "Using legacy qcom,chipid binding!\n"); + dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n", + rev->core, rev->major, rev->minor, rev->patchid); + + return 0; +} + +static int adreno_bind(struct device *dev, struct device *master, void *data) +{ + static struct adreno_platform_config config = {}; + const struct adreno_info *info; + struct msm_drm_private *priv = dev_get_drvdata(master); + struct drm_device *drm = priv->dev; + struct msm_gpu *gpu; + int ret; + + ret = find_chipid(dev, &config.rev); + if (ret) + return ret; + + dev->platform_data = &config; + priv->gpu_pdev = to_platform_device(dev); + + info = adreno_info(config.rev); + + if (!info) { + dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n", + config.rev.core, config.rev.major, + config.rev.minor, config.rev.patchid); + return -ENXIO; + } + + DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major, + config.rev.minor, config.rev.patchid); + + priv->is_a2xx = config.rev.core == 2; + priv->has_cached_coherent = config.rev.core >= 6; + + gpu = info->init(drm); + if (IS_ERR(gpu)) { + dev_warn(drm->dev, "failed to load adreno gpu\n"); + return PTR_ERR(gpu); + } + + return 0; +} + +static int adreno_system_suspend(struct device *dev); +static void adreno_unbind(struct device *dev, struct device *master, + void *data) +{ + struct msm_drm_private *priv = dev_get_drvdata(master); + struct msm_gpu *gpu = dev_to_gpu(dev); + + if (pm_runtime_enabled(dev)) + WARN_ON_ONCE(adreno_system_suspend(dev)); + gpu->funcs->destroy(gpu); + + priv->gpu_pdev = NULL; +} + +static const struct component_ops a3xx_ops = { + .bind = adreno_bind, + .unbind = adreno_unbind, +}; + +static void adreno_device_register_headless(void) +{ + /* on imx5, we don't have a top-level mdp/dpu node + * this creates a dummy node for the driver for that case + */ + struct platform_device_info dummy_info = { + .parent = NULL, + .name = "msm", + .id = -1, + .res = NULL, + .num_res = 0, + .data = NULL, + .size_data = 0, + .dma_mask = ~0, + }; + platform_device_register_full(&dummy_info); +} + +static int adreno_probe(struct platform_device *pdev) +{ + + int ret; + + ret = component_add(&pdev->dev, &a3xx_ops); + if (ret) + return ret; + + if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon")) + adreno_device_register_headless(); + + return 0; +} + +static int adreno_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &a3xx_ops); + return 0; +} + +static void adreno_shutdown(struct platform_device *pdev) +{ + WARN_ON_ONCE(adreno_system_suspend(&pdev->dev)); +} + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,adreno" }, + { .compatible = "qcom,adreno-3xx" }, + /* for compatibility with imx5 gpu: */ + { .compatible = "amd,imageon" }, + /* for backwards compat w/ downstream kgsl DT files: */ + { .compatible = "qcom,kgsl-3d0" }, + {} +}; + +static int adreno_runtime_resume(struct device *dev) +{ + struct msm_gpu *gpu = dev_to_gpu(dev); + + return gpu->funcs->pm_resume(gpu); +} + +static int adreno_runtime_suspend(struct device *dev) +{ + struct msm_gpu *gpu = dev_to_gpu(dev); + + /* + * We should be holding a runpm ref, which will prevent + * runtime suspend. In the system suspend path, we've + * already waited for active jobs to complete. + */ + WARN_ON_ONCE(gpu->active_submits); + + return gpu->funcs->pm_suspend(gpu); +} + +static void suspend_scheduler(struct msm_gpu *gpu) +{ + int i; + + /* + * Shut down the scheduler before we force suspend, so that + * suspend isn't racing with scheduler kthread feeding us + * more work. + * + * Note, we just want to park the thread, and let any jobs + * that are already on the hw queue complete normally, as + * opposed to the drm_sched_stop() path used for handling + * faulting/timed-out jobs. We can't really cancel any jobs + * already on the hw queue without racing with the GPU. + */ + for (i = 0; i < gpu->nr_rings; i++) { + struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; + kthread_park(sched->thread); + } +} + +static void resume_scheduler(struct msm_gpu *gpu) +{ + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; + kthread_unpark(sched->thread); + } +} + +static int adreno_system_suspend(struct device *dev) +{ + struct msm_gpu *gpu = dev_to_gpu(dev); + int remaining, ret; + + if (!gpu) + return 0; + + suspend_scheduler(gpu); + + remaining = wait_event_timeout(gpu->retire_event, + gpu->active_submits == 0, + msecs_to_jiffies(1000)); + if (remaining == 0) { + dev_err(dev, "Timeout waiting for GPU to suspend\n"); + ret = -EBUSY; + goto out; + } + + ret = pm_runtime_force_suspend(dev); +out: + if (ret) + resume_scheduler(gpu); + + return ret; +} + +static int adreno_system_resume(struct device *dev) +{ + struct msm_gpu *gpu = dev_to_gpu(dev); + + if (!gpu) + return 0; + + resume_scheduler(gpu); + return pm_runtime_force_resume(dev); +} + +static const struct dev_pm_ops adreno_pm_ops = { + SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume) + RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL) +}; + +static struct platform_driver adreno_driver = { + .probe = adreno_probe, + .remove = adreno_remove, + .shutdown = adreno_shutdown, + .driver = { + .name = "adreno", + .of_match_table = dt_match, + .pm = &adreno_pm_ops, + }, +}; + +void __init adreno_register(void) +{ + platform_driver_register(&adreno_driver); +} + +void __exit adreno_unregister(void) +{ + platform_driver_unregister(&adreno_driver); +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c new file mode 100644 index 000000000..dfd4eec21 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -0,0 +1,1095 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adreno_gpu.h" +#include "a6xx_gpu.h" +#include "msm_gem.h" +#include "msm_mmu.h" + +static u64 address_space_size = 0; +MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space"); +module_param(address_space_size, ullong, 0600); + +static bool zap_available = true; + +static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, + u32 pasid) +{ + struct device *dev = &gpu->pdev->dev; + const struct firmware *fw; + const char *signed_fwname = NULL; + struct device_node *np, *mem_np; + struct resource r; + phys_addr_t mem_phys; + ssize_t mem_size; + void *mem_region = NULL; + int ret; + + if (!IS_ENABLED(CONFIG_ARCH_QCOM)) { + zap_available = false; + return -EINVAL; + } + + np = of_get_child_by_name(dev->of_node, "zap-shader"); + if (!np) { + zap_available = false; + return -ENODEV; + } + + mem_np = of_parse_phandle(np, "memory-region", 0); + of_node_put(np); + if (!mem_np) { + zap_available = false; + return -EINVAL; + } + + ret = of_address_to_resource(mem_np, 0, &r); + of_node_put(mem_np); + if (ret) + return ret; + + mem_phys = r.start; + + /* + * Check for a firmware-name property. This is the new scheme + * to handle firmware that may be signed with device specific + * keys, allowing us to have a different zap fw path for different + * devices. + * + * If the firmware-name property is found, we bypass the + * adreno_request_fw() mechanism, because we don't need to handle + * the /lib/firmware/qcom/... vs /lib/firmware/... case. + * + * If the firmware-name property is not found, for backwards + * compatibility we fall back to the fwname from the gpulist + * table. + */ + of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); + if (signed_fwname) { + fwname = signed_fwname; + ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); + if (ret) + fw = ERR_PTR(ret); + } else if (fwname) { + /* Request the MDT file from the default location: */ + fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); + } else { + /* + * For new targets, we require the firmware-name property, + * if a zap-shader is required, rather than falling back + * to a firmware name specified in gpulist. + * + * Because the firmware is signed with a (potentially) + * device specific key, having the name come from gpulist + * was a bad idea, and is only provided for backwards + * compatibility for older targets. + */ + return -ENODEV; + } + + if (IS_ERR(fw)) { + DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname); + return PTR_ERR(fw); + } + + /* Figure out how much memory we need */ + mem_size = qcom_mdt_get_size(fw); + if (mem_size < 0) { + ret = mem_size; + goto out; + } + + if (mem_size > resource_size(&r)) { + DRM_DEV_ERROR(dev, + "memory region is too small to load the MDT\n"); + ret = -E2BIG; + goto out; + } + + /* Allocate memory for the firmware image */ + mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); + if (!mem_region) { + ret = -ENOMEM; + goto out; + } + + /* + * Load the rest of the MDT + * + * Note that we could be dealing with two different paths, since + * with upstream linux-firmware it would be in a qcom/ subdir.. + * adreno_request_fw() handles this, but qcom_mdt_load() does + * not. But since we've already gotten through adreno_request_fw() + * we know which of the two cases it is: + */ + if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { + ret = qcom_mdt_load(dev, fw, fwname, pasid, + mem_region, mem_phys, mem_size, NULL); + } else { + char *newname; + + newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname); + + ret = qcom_mdt_load(dev, fw, newname, pasid, + mem_region, mem_phys, mem_size, NULL); + kfree(newname); + } + if (ret) + goto out; + + /* Send the image to the secure world */ + ret = qcom_scm_pas_auth_and_reset(pasid); + + /* + * If the scm call returns -EOPNOTSUPP we assume that this target + * doesn't need/support the zap shader so quietly fail + */ + if (ret == -EOPNOTSUPP) + zap_available = false; + else if (ret) + DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); + +out: + if (mem_region) + memunmap(mem_region); + + release_firmware(fw); + + return ret; +} + +int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct platform_device *pdev = gpu->pdev; + + /* Short cut if we determine the zap shader isn't available/needed */ + if (!zap_available) + return -ENODEV; + + /* We need SCM to be able to load the firmware */ + if (!qcom_scm_is_available()) { + DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); + return -EPROBE_DEFER; + } + + return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); +} + +void adreno_set_llc_attributes(struct iommu_domain *iommu) +{ + iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA); +} + +struct msm_gem_address_space * +adreno_iommu_create_address_space(struct msm_gpu *gpu, + struct platform_device *pdev) +{ + struct iommu_domain *iommu; + struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; + u64 start, size; + + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + mmu = msm_iommu_new(&pdev->dev, iommu); + if (IS_ERR(mmu)) { + iommu_domain_free(iommu); + return ERR_CAST(mmu); + } + + /* + * Use the aperture start or SZ_16M, whichever is greater. This will + * ensure that we align with the allocated pagetable range while still + * allowing room in the lower 32 bits for GMEM and whatnot + */ + start = max_t(u64, SZ_16M, iommu->geometry.aperture_start); + size = iommu->geometry.aperture_end - start + 1; + + aspace = msm_gem_address_space_create(mmu, "gpu", + start & GENMASK_ULL(48, 0), size); + + if (IS_ERR(aspace) && !IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + + return aspace; +} + +u64 adreno_private_address_space_size(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + + if (address_space_size) + return address_space_size; + + if (adreno_gpu->info->address_space_size) + return adreno_gpu->info->address_space_size; + + return SZ_4G; +} + +int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, + uint32_t param, uint64_t *value, uint32_t *len) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + + /* No pointer params yet */ + if (*len != 0) + return -EINVAL; + + switch (param) { + case MSM_PARAM_GPU_ID: + *value = adreno_gpu->info->revn; + return 0; + case MSM_PARAM_GMEM_SIZE: + *value = adreno_gpu->gmem; + return 0; + case MSM_PARAM_GMEM_BASE: + *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0; + return 0; + case MSM_PARAM_CHIP_ID: + *value = (uint64_t)adreno_gpu->rev.patchid | + ((uint64_t)adreno_gpu->rev.minor << 8) | + ((uint64_t)adreno_gpu->rev.major << 16) | + ((uint64_t)adreno_gpu->rev.core << 24); + if (!adreno_gpu->info->revn) + *value |= ((uint64_t) adreno_gpu->speedbin) << 32; + return 0; + case MSM_PARAM_MAX_FREQ: + *value = adreno_gpu->base.fast_rate; + return 0; + case MSM_PARAM_TIMESTAMP: + if (adreno_gpu->funcs->get_timestamp) { + int ret; + + pm_runtime_get_sync(&gpu->pdev->dev); + ret = adreno_gpu->funcs->get_timestamp(gpu, value); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + + return ret; + } + return -EINVAL; + case MSM_PARAM_PRIORITIES: + *value = gpu->nr_rings * NR_SCHED_PRIORITIES; + return 0; + case MSM_PARAM_PP_PGTABLE: + *value = 0; + return 0; + case MSM_PARAM_FAULTS: + if (ctx->aspace) + *value = gpu->global_faults + ctx->aspace->faults; + else + *value = gpu->global_faults; + return 0; + case MSM_PARAM_SUSPENDS: + *value = gpu->suspend_count; + return 0; + case MSM_PARAM_VA_START: + if (ctx->aspace == gpu->aspace) + return -EINVAL; + *value = ctx->aspace->va_start; + return 0; + case MSM_PARAM_VA_SIZE: + if (ctx->aspace == gpu->aspace) + return -EINVAL; + *value = ctx->aspace->va_size; + return 0; + default: + DBG("%s: invalid param: %u", gpu->name, param); + return -EINVAL; + } +} + +int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, + uint32_t param, uint64_t value, uint32_t len) +{ + switch (param) { + case MSM_PARAM_COMM: + case MSM_PARAM_CMDLINE: + /* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so + * that should be a reasonable upper bound + */ + if (len > PAGE_SIZE) + return -EINVAL; + break; + default: + if (len != 0) + return -EINVAL; + } + + switch (param) { + case MSM_PARAM_COMM: + case MSM_PARAM_CMDLINE: { + char *str, **paramp; + + str = kmalloc(len + 1, GFP_KERNEL); + if (!str) + return -ENOMEM; + + if (copy_from_user(str, u64_to_user_ptr(value), len)) { + kfree(str); + return -EFAULT; + } + + /* Ensure string is null terminated: */ + str[len] = '\0'; + + mutex_lock(&gpu->lock); + + if (param == MSM_PARAM_COMM) { + paramp = &ctx->comm; + } else { + paramp = &ctx->cmdline; + } + + kfree(*paramp); + *paramp = str; + + mutex_unlock(&gpu->lock); + + return 0; + } + case MSM_PARAM_SYSPROF: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return msm_file_private_set_sysprof(ctx, gpu, value); + default: + DBG("%s: invalid param: %u", gpu->name, param); + return -EINVAL; + } +} + +const struct firmware * +adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) +{ + struct drm_device *drm = adreno_gpu->base.dev; + const struct firmware *fw = NULL; + char *newname; + int ret; + + newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname); + if (!newname) + return ERR_PTR(-ENOMEM); + + /* + * Try first to load from qcom/$fwfile using a direct load (to avoid + * a potential timeout waiting for usermode helper) + */ + if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || + (adreno_gpu->fwloc == FW_LOCATION_NEW)) { + + ret = request_firmware_direct(&fw, newname, drm->dev); + if (!ret) { + DRM_DEV_INFO(drm->dev, "loaded %s from new location\n", + newname); + adreno_gpu->fwloc = FW_LOCATION_NEW; + goto out; + } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { + DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", + newname, ret); + fw = ERR_PTR(ret); + goto out; + } + } + + /* + * Then try the legacy location without qcom/ prefix + */ + if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || + (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) { + + ret = request_firmware_direct(&fw, fwname, drm->dev); + if (!ret) { + DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", + newname); + adreno_gpu->fwloc = FW_LOCATION_LEGACY; + goto out; + } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { + DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", + fwname, ret); + fw = ERR_PTR(ret); + goto out; + } + } + + /* + * Finally fall back to request_firmware() for cases where the + * usermode helper is needed (I think mainly android) + */ + if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || + (adreno_gpu->fwloc == FW_LOCATION_HELPER)) { + + ret = request_firmware(&fw, newname, drm->dev); + if (!ret) { + DRM_DEV_INFO(drm->dev, "loaded %s with helper\n", + newname); + adreno_gpu->fwloc = FW_LOCATION_HELPER; + goto out; + } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { + DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", + newname, ret); + fw = ERR_PTR(ret); + goto out; + } + } + + DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname); + fw = ERR_PTR(-ENOENT); +out: + kfree(newname); + return fw; +} + +int adreno_load_fw(struct adreno_gpu *adreno_gpu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { + const struct firmware *fw; + + if (!adreno_gpu->info->fw[i]) + continue; + + /* Skip if the firmware has already been loaded */ + if (adreno_gpu->fw[i]) + continue; + + fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); + if (IS_ERR(fw)) + return PTR_ERR(fw); + + adreno_gpu->fw[i] = fw; + } + + return 0; +} + +struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova) +{ + struct drm_gem_object *bo; + void *ptr; + + ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); + + if (IS_ERR(ptr)) + return ERR_CAST(ptr); + + memcpy(ptr, &fw->data[4], fw->size - 4); + + msm_gem_put_vaddr(bo); + + return bo; +} + +int adreno_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret, i; + + VERB("%s", gpu->name); + + ret = adreno_load_fw(adreno_gpu); + if (ret) + return ret; + + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + if (!ring) + continue; + + ring->cur = ring->start; + ring->next = ring->start; + ring->memptrs->rptr = 0; + + /* Detect and clean up an impossible fence, ie. if GPU managed + * to scribble something invalid, we don't want that to confuse + * us into mistakingly believing that submits have completed. + */ + if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { + ring->memptrs->fence = ring->fctx->last_fence; + } + } + + return 0; +} + +/* Use this helper to read rptr, since a430 doesn't update rptr in memory */ +static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, + struct msm_ringbuffer *ring) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + return gpu->funcs->get_rptr(gpu, ring); +} + +struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) +{ + return gpu->rb[0]; +} + +void adreno_recover(struct msm_gpu *gpu) +{ + struct drm_device *dev = gpu->dev; + int ret; + + // XXX pm-runtime?? we *need* the device to be off after this + // so maybe continuing to call ->pm_suspend/resume() is better? + + gpu->funcs->pm_suspend(gpu); + gpu->funcs->pm_resume(gpu); + + ret = msm_gpu_hw_init(gpu); + if (ret) { + DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); + /* hmm, oh well? */ + } +} + +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg) +{ + uint32_t wptr; + + /* Copy the shadow to the actual register */ + ring->cur = ring->next; + + /* + * Mask wptr value that we calculate to fit in the HW range. This is + * to account for the possibility that the last command fit exactly into + * the ringbuffer and rb->next hasn't wrapped to zero yet + */ + wptr = get_wptr(ring); + + /* ensure writes to ringbuffer have hit system memory: */ + mb(); + + gpu_write(gpu, reg, wptr); +} + +bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + uint32_t wptr = get_wptr(ring); + + /* wait for CP to drain ringbuffer: */ + if (!spin_until(get_rptr(adreno_gpu, ring) == wptr)) + return true; + + /* TODO maybe we need to reset GPU here to recover from hang? */ + DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n", + gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); + + return false; +} + +int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int i, count = 0; + + WARN_ON(!mutex_is_locked(&gpu->lock)); + + kref_init(&state->ref); + + ktime_get_real_ts64(&state->time); + + for (i = 0; i < gpu->nr_rings; i++) { + int size = 0, j; + + state->ring[i].fence = gpu->rb[i]->memptrs->fence; + state->ring[i].iova = gpu->rb[i]->iova; + state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; + state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); + state->ring[i].wptr = get_wptr(gpu->rb[i]); + + /* Copy at least 'wptr' dwords of the data */ + size = state->ring[i].wptr; + + /* After wptr find the last non zero dword to save space */ + for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++) + if (gpu->rb[i]->start[j]) + size = j + 1; + + if (size) { + state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL); + if (state->ring[i].data) { + memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); + state->ring[i].data_size = size << 2; + } + } + } + + /* Some targets prefer to collect their own registers */ + if (!adreno_gpu->registers) + return 0; + + /* Count the number of registers */ + for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) + count += adreno_gpu->registers[i + 1] - + adreno_gpu->registers[i] + 1; + + state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL); + if (state->registers) { + int pos = 0; + + for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { + u32 start = adreno_gpu->registers[i]; + u32 end = adreno_gpu->registers[i + 1]; + u32 addr; + + for (addr = start; addr <= end; addr++) { + state->registers[pos++] = addr; + state->registers[pos++] = gpu_read(gpu, addr); + } + } + + state->nr_registers = count; + } + + return 0; +} + +void adreno_gpu_state_destroy(struct msm_gpu_state *state) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(state->ring); i++) + kvfree(state->ring[i].data); + + for (i = 0; state->bos && i < state->nr_bos; i++) + kvfree(state->bos[i].data); + + kfree(state->bos); + kfree(state->comm); + kfree(state->cmd); + kfree(state->registers); +} + +static void adreno_gpu_state_kref_destroy(struct kref *kref) +{ + struct msm_gpu_state *state = container_of(kref, + struct msm_gpu_state, ref); + + adreno_gpu_state_destroy(state); + kfree(state); +} + +int adreno_gpu_state_put(struct msm_gpu_state *state) +{ + if (IS_ERR_OR_NULL(state)) + return 1; + + return kref_put(&state->ref, adreno_gpu_state_kref_destroy); +} + +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) + +static char *adreno_gpu_ascii85_encode(u32 *src, size_t len) +{ + void *buf; + size_t buf_itr = 0, buffer_size; + char out[ASCII85_BUFSZ]; + long l; + int i; + + if (!src || !len) + return NULL; + + l = ascii85_encode_len(len); + + /* + * Ascii85 outputs either a 5 byte string or a 1 byte string. So we + * account for the worst case of 5 bytes per dword plus the 1 for '\0' + */ + buffer_size = (l * 5) + 1; + + buf = kvmalloc(buffer_size, GFP_KERNEL); + if (!buf) + return NULL; + + for (i = 0; i < l; i++) + buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", + ascii85_encode(src[i], out)); + + return buf; +} + +/* len is expected to be in bytes + * + * WARNING: *ptr should be allocated with kvmalloc or friends. It can be free'd + * with kvfree() and replaced with a newly kvmalloc'd buffer on the first call + * when the unencoded raw data is encoded + */ +void adreno_show_object(struct drm_printer *p, void **ptr, int len, + bool *encoded) +{ + if (!*ptr || !len) + return; + + if (!*encoded) { + long datalen, i; + u32 *buf = *ptr; + + /* + * Only dump the non-zero part of the buffer - rarely will + * any data completely fill the entire allocated size of + * the buffer. + */ + for (datalen = 0, i = 0; i < len >> 2; i++) + if (buf[i]) + datalen = ((i + 1) << 2); + + /* + * If we reach here, then the originally captured binary buffer + * will be replaced with the ascii85 encoded string + */ + *ptr = adreno_gpu_ascii85_encode(buf, datalen); + + kvfree(buf); + + *encoded = true; + } + + if (!*ptr) + return; + + drm_puts(p, " data: !!ascii85 |\n"); + drm_puts(p, " "); + + drm_puts(p, *ptr); + + drm_puts(p, "\n"); +} + +void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, + struct drm_printer *p) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int i; + + if (IS_ERR_OR_NULL(state)) + return; + + drm_printf(p, "revision: %d (%d.%d.%d.%d)\n", + adreno_gpu->info->revn, adreno_gpu->rev.core, + adreno_gpu->rev.major, adreno_gpu->rev.minor, + adreno_gpu->rev.patchid); + /* + * If this is state collected due to iova fault, so fault related info + * + * TTBR0 would not be zero, so this is a good way to distinguish + */ + if (state->fault_info.ttbr0) { + const struct msm_gpu_fault_info *info = &state->fault_info; + + drm_puts(p, "fault-info:\n"); + drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0); + drm_printf(p, " - iova=%.16lx\n", info->iova); + drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); + drm_printf(p, " - type=%s\n", info->type); + drm_printf(p, " - source=%s\n", info->block); + } + + drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); + + drm_puts(p, "ringbuffer:\n"); + + for (i = 0; i < gpu->nr_rings; i++) { + drm_printf(p, " - id: %d\n", i); + drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova); + drm_printf(p, " last-fence: %u\n", state->ring[i].seqno); + drm_printf(p, " retired-fence: %u\n", state->ring[i].fence); + drm_printf(p, " rptr: %u\n", state->ring[i].rptr); + drm_printf(p, " wptr: %u\n", state->ring[i].wptr); + drm_printf(p, " size: %u\n", MSM_GPU_RINGBUFFER_SZ); + + adreno_show_object(p, &state->ring[i].data, + state->ring[i].data_size, &state->ring[i].encoded); + } + + if (state->bos) { + drm_puts(p, "bos:\n"); + + for (i = 0; i < state->nr_bos; i++) { + drm_printf(p, " - iova: 0x%016llx\n", + state->bos[i].iova); + drm_printf(p, " size: %zd\n", state->bos[i].size); + drm_printf(p, " name: %-32s\n", state->bos[i].name); + + adreno_show_object(p, &state->bos[i].data, + state->bos[i].size, &state->bos[i].encoded); + } + } + + if (state->nr_registers) { + drm_puts(p, "registers:\n"); + + for (i = 0; i < state->nr_registers; i++) { + drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", + state->registers[i * 2] << 2, + state->registers[(i * 2) + 1]); + } + } +} +#endif + +/* Dump common gpu status and scratch registers on any hang, to make + * the hangcheck logs more useful. The scratch registers seem always + * safe to read when GPU has hung (unlike some other regs, depending + * on how the GPU hung), and they are useful to match up to cmdstream + * dumps when debugging hangs: + */ +void adreno_dump_info(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int i; + + printk("revision: %d (%d.%d.%d.%d)\n", + adreno_gpu->info->revn, adreno_gpu->rev.core, + adreno_gpu->rev.major, adreno_gpu->rev.minor, + adreno_gpu->rev.patchid); + + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + printk("rb %d: fence: %d/%d\n", i, + ring->memptrs->fence, + ring->fctx->last_fence); + + printk("rptr: %d\n", get_rptr(adreno_gpu, ring)); + printk("rb wptr: %d\n", get_wptr(ring)); + } +} + +/* would be nice to not have to duplicate the _show() stuff with printk(): */ +void adreno_dump(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int i; + + if (!adreno_gpu->registers) + return; + + /* dump these out in a form that can be parsed by demsm: */ + printk("IO:region %s 00000000 00020000\n", gpu->name); + for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { + uint32_t start = adreno_gpu->registers[i]; + uint32_t end = adreno_gpu->registers[i+1]; + uint32_t addr; + + for (addr = start; addr <= end; addr++) { + uint32_t val = gpu_read(gpu, addr); + printk("IO:R %08x %08x\n", addr<<2, val); + } + } +} + +static uint32_t ring_freewords(struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); + uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2; + /* Use ring->next to calculate free size */ + uint32_t wptr = ring->next - ring->start; + uint32_t rptr = get_rptr(adreno_gpu, ring); + return (rptr + (size - 1) - wptr) % size; +} + +void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords) +{ + if (spin_until(ring_freewords(ring) >= ndwords)) + DRM_DEV_ERROR(ring->gpu->dev->dev, + "timeout waiting for space in ringbuffer %d\n", + ring->id); +} + +/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */ +static int adreno_get_legacy_pwrlevels(struct device *dev) +{ + struct device_node *child, *node; + int ret; + + node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels"); + if (!node) { + DRM_DEV_DEBUG(dev, "Could not find the GPU powerlevels\n"); + return -ENXIO; + } + + for_each_child_of_node(node, child) { + unsigned int val; + + ret = of_property_read_u32(child, "qcom,gpu-freq", &val); + if (ret) + continue; + + /* + * Skip the intentionally bogus clock value found at the bottom + * of most legacy frequency tables + */ + if (val != 27000000) + dev_pm_opp_add(dev, val, 0); + } + + of_node_put(node); + + return 0; +} + +static void adreno_get_pwrlevels(struct device *dev, + struct msm_gpu *gpu) +{ + unsigned long freq = ULONG_MAX; + struct dev_pm_opp *opp; + int ret; + + gpu->fast_rate = 0; + + /* You down with OPP? */ + if (!of_find_property(dev->of_node, "operating-points-v2", NULL)) + ret = adreno_get_legacy_pwrlevels(dev); + else { + ret = devm_pm_opp_of_add_table(dev); + if (ret) + DRM_DEV_ERROR(dev, "Unable to set the OPP table\n"); + } + + if (!ret) { + /* Find the fastest defined rate */ + opp = dev_pm_opp_find_freq_floor(dev, &freq); + if (!IS_ERR(opp)) { + gpu->fast_rate = freq; + dev_pm_opp_put(opp); + } + } + + if (!gpu->fast_rate) { + dev_warn(dev, + "Could not find a clock rate. Using a reasonable default\n"); + /* Pick a suitably safe clock speed for any target */ + gpu->fast_rate = 200000000; + } + + DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); +} + +int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, + struct adreno_ocmem *adreno_ocmem) +{ + struct ocmem_buf *ocmem_hdl; + struct ocmem *ocmem; + + ocmem = of_get_ocmem(dev); + if (IS_ERR(ocmem)) { + if (PTR_ERR(ocmem) == -ENODEV) { + /* + * Return success since either the ocmem property was + * not specified in device tree, or ocmem support is + * not compiled into the kernel. + */ + return 0; + } + + return PTR_ERR(ocmem); + } + + ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->gmem); + if (IS_ERR(ocmem_hdl)) + return PTR_ERR(ocmem_hdl); + + adreno_ocmem->ocmem = ocmem; + adreno_ocmem->base = ocmem_hdl->addr; + adreno_ocmem->hdl = ocmem_hdl; + adreno_gpu->gmem = ocmem_hdl->len; + + return 0; +} + +void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem) +{ + if (adreno_ocmem && adreno_ocmem->base) + ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, + adreno_ocmem->hdl); +} + +int adreno_read_speedbin(struct device *dev, u32 *speedbin) +{ + return nvmem_cell_read_variable_le_u32(dev, "speed_bin", speedbin); +} + +int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, + struct adreno_gpu *adreno_gpu, + const struct adreno_gpu_funcs *funcs, int nr_rings) +{ + struct device *dev = &pdev->dev; + struct adreno_platform_config *config = dev->platform_data; + struct msm_gpu_config adreno_gpu_config = { 0 }; + struct msm_gpu *gpu = &adreno_gpu->base; + struct adreno_rev *rev = &config->rev; + const char *gpu_name; + u32 speedbin; + + adreno_gpu->funcs = funcs; + adreno_gpu->info = adreno_info(config->rev); + adreno_gpu->gmem = adreno_gpu->info->gmem; + adreno_gpu->revn = adreno_gpu->info->revn; + adreno_gpu->rev = *rev; + + if (adreno_read_speedbin(dev, &speedbin) || !speedbin) + speedbin = 0xffff; + adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); + + gpu_name = adreno_gpu->info->name; + if (!gpu_name) { + gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%d.%d.%d.%d", + rev->core, rev->major, rev->minor, + rev->patchid); + if (!gpu_name) + return -ENOMEM; + } + + adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; + + adreno_gpu_config.nr_rings = nr_rings; + + adreno_get_pwrlevels(dev, gpu); + + pm_runtime_set_autosuspend_delay(dev, + adreno_gpu->info->inactive_period); + pm_runtime_use_autosuspend(dev); + + return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, + gpu_name, &adreno_gpu_config); +} + +void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) + release_firmware(adreno_gpu->fw[i]); + + if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev)) + pm_runtime_disable(&priv->gpu_pdev->dev); + + msm_gpu_cleanup(&adreno_gpu->base); +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h new file mode 100644 index 000000000..3d78efb06 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + * + * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved. + */ + +#ifndef __ADRENO_GPU_H__ +#define __ADRENO_GPU_H__ + +#include +#include + +#include "msm_gpu.h" + +#include "adreno_common.xml.h" +#include "adreno_pm4.xml.h" + +extern bool snapshot_debugbus; +extern bool allow_vram_carveout; + +enum { + ADRENO_FW_PM4 = 0, + ADRENO_FW_SQE = 0, /* a6xx */ + ADRENO_FW_PFP = 1, + ADRENO_FW_GMU = 1, /* a6xx */ + ADRENO_FW_GPMU = 2, + ADRENO_FW_MAX, +}; + +#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0) +#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1) +#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2) + +struct adreno_rev { + uint8_t core; + uint8_t major; + uint8_t minor; + uint8_t patchid; +}; + +#define ANY_ID 0xff + +#define ADRENO_REV(core, major, minor, patchid) \ + ((struct adreno_rev){ core, major, minor, patchid }) + +struct adreno_gpu_funcs { + struct msm_gpu_funcs base; + int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); +}; + +struct adreno_reglist { + u32 offset; + u32 value; +}; + +extern const struct adreno_reglist a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[]; + +struct adreno_info { + struct adreno_rev rev; + uint32_t revn; + const char *name; + const char *fw[ADRENO_FW_MAX]; + uint32_t gmem; + u64 quirks; + struct msm_gpu *(*init)(struct drm_device *dev); + const char *zapfw; + u32 inactive_period; + const struct adreno_reglist *hwcg; + u64 address_space_size; +}; + +const struct adreno_info *adreno_info(struct adreno_rev rev); + +struct adreno_gpu { + struct msm_gpu base; + struct adreno_rev rev; + const struct adreno_info *info; + uint32_t gmem; /* actual gmem size */ + uint32_t revn; /* numeric revision name */ + uint16_t speedbin; + const struct adreno_gpu_funcs *funcs; + + /* interesting register offsets to dump: */ + const unsigned int *registers; + + /* + * Are we loading fw from legacy path? Prior to addition + * of gpu firmware to linux-firmware, the fw files were + * placed in toplevel firmware directory, following qcom's + * android kernel. But linux-firmware preferred they be + * placed in a 'qcom' subdirectory. + * + * For backwards compatibility, we try first to load from + * the new path, using request_firmware_direct() to avoid + * any potential timeout waiting for usermode helper, then + * fall back to the old path (with direct load). And + * finally fall back to request_firmware() with the new + * path to allow the usermode helper. + */ + enum { + FW_LOCATION_UNKNOWN = 0, + FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */ + FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */ + FW_LOCATION_HELPER, + } fwloc; + + /* firmware: */ + const struct firmware *fw[ADRENO_FW_MAX]; + + /* + * Register offsets are different between some GPUs. + * GPU specific offsets will be exported by GPU specific + * code (a3xx_gpu.c) and stored in this common location. + */ + const unsigned int *reg_offsets; +}; +#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) + +struct adreno_ocmem { + struct ocmem *ocmem; + unsigned long base; + void *hdl; +}; + +/* platform config data (ie. from DT, or pdata) */ +struct adreno_platform_config { + struct adreno_rev rev; +}; + +#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) + +#define spin_until(X) ({ \ + int __ret = -ETIMEDOUT; \ + unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \ + do { \ + if (X) { \ + __ret = 0; \ + break; \ + } \ + } while (time_before(jiffies, __t)); \ + __ret; \ +}) + +bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2); + +static inline bool adreno_is_a2xx(struct adreno_gpu *gpu) +{ + return (gpu->revn < 300); +} + +static inline bool adreno_is_a20x(struct adreno_gpu *gpu) +{ + return (gpu->revn < 210); +} + +static inline bool adreno_is_a225(struct adreno_gpu *gpu) +{ + return gpu->revn == 225; +} + +static inline bool adreno_is_a305(struct adreno_gpu *gpu) +{ + return gpu->revn == 305; +} + +static inline bool adreno_is_a306(struct adreno_gpu *gpu) +{ + /* yes, 307, because a305c is 306 */ + return gpu->revn == 307; +} + +static inline bool adreno_is_a320(struct adreno_gpu *gpu) +{ + return gpu->revn == 320; +} + +static inline bool adreno_is_a330(struct adreno_gpu *gpu) +{ + return gpu->revn == 330; +} + +static inline bool adreno_is_a330v2(struct adreno_gpu *gpu) +{ + return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); +} + +static inline int adreno_is_a405(struct adreno_gpu *gpu) +{ + return gpu->revn == 405; +} + +static inline int adreno_is_a420(struct adreno_gpu *gpu) +{ + return gpu->revn == 420; +} + +static inline int adreno_is_a430(struct adreno_gpu *gpu) +{ + return gpu->revn == 430; +} + +static inline int adreno_is_a506(struct adreno_gpu *gpu) +{ + return gpu->revn == 506; +} + +static inline int adreno_is_a508(struct adreno_gpu *gpu) +{ + return gpu->revn == 508; +} + +static inline int adreno_is_a509(struct adreno_gpu *gpu) +{ + return gpu->revn == 509; +} + +static inline int adreno_is_a510(struct adreno_gpu *gpu) +{ + return gpu->revn == 510; +} + +static inline int adreno_is_a512(struct adreno_gpu *gpu) +{ + return gpu->revn == 512; +} + +static inline int adreno_is_a530(struct adreno_gpu *gpu) +{ + return gpu->revn == 530; +} + +static inline int adreno_is_a540(struct adreno_gpu *gpu) +{ + return gpu->revn == 540; +} + +static inline int adreno_is_a618(struct adreno_gpu *gpu) +{ + return gpu->revn == 618; +} + +static inline int adreno_is_a619(struct adreno_gpu *gpu) +{ + return gpu->revn == 619; +} + +static inline int adreno_is_a630(struct adreno_gpu *gpu) +{ + return gpu->revn == 630; +} + +static inline int adreno_is_a640_family(struct adreno_gpu *gpu) +{ + return (gpu->revn == 640) || (gpu->revn == 680); +} + +static inline int adreno_is_a650(struct adreno_gpu *gpu) +{ + return gpu->revn == 650; +} + +static inline int adreno_is_7c3(struct adreno_gpu *gpu) +{ + /* The order of args is important here to handle ANY_ID correctly */ + return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev); +} + +static inline int adreno_is_a660(struct adreno_gpu *gpu) +{ + return gpu->revn == 660; +} + +/* check for a615, a616, a618, a619 or any derivatives */ +static inline int adreno_is_a615_family(struct adreno_gpu *gpu) +{ + return gpu->revn == 615 || gpu->revn == 616 || gpu->revn == 618 || gpu->revn == 619; +} + +static inline int adreno_is_a660_family(struct adreno_gpu *gpu) +{ + return adreno_is_a660(gpu) || adreno_is_7c3(gpu); +} + +/* check for a650, a660, or any derivatives */ +static inline int adreno_is_a650_family(struct adreno_gpu *gpu) +{ + return gpu->revn == 650 || gpu->revn == 620 || adreno_is_a660_family(gpu); +} + +u64 adreno_private_address_space_size(struct msm_gpu *gpu); +int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, + uint32_t param, uint64_t *value, uint32_t *len); +int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, + uint32_t param, uint64_t value, uint32_t len); +const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, + const char *fwname); +struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova); +int adreno_hw_init(struct msm_gpu *gpu); +void adreno_recover(struct msm_gpu *gpu); +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg); +bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) +void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, + struct drm_printer *p); +#endif +void adreno_dump_info(struct msm_gpu *gpu); +void adreno_dump(struct msm_gpu *gpu); +void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); +struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); + +int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu, + struct adreno_ocmem *ocmem); +void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem); + +int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, + struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, + int nr_rings); +void adreno_gpu_cleanup(struct adreno_gpu *gpu); +int adreno_load_fw(struct adreno_gpu *adreno_gpu); + +void adreno_gpu_state_destroy(struct msm_gpu_state *state); + +int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state); +int adreno_gpu_state_put(struct msm_gpu_state *state); +void adreno_show_object(struct drm_printer *p, void **ptr, int len, + bool *encoded); + +/* + * Common helper function to initialize the default address space for arm-smmu + * attached targets + */ +struct msm_gem_address_space * +adreno_iommu_create_address_space(struct msm_gpu *gpu, + struct platform_device *pdev); + +void adreno_set_llc_attributes(struct iommu_domain *iommu); + +int adreno_read_speedbin(struct device *dev, u32 *speedbin); + +/* + * For a5xx and a6xx targets load the zap shader that is used to pull the GPU + * out of secure mode + */ +int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid); + +/* ringbuffer helpers (the parts that are adreno specific) */ + +static inline void +OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) +{ + adreno_wait_ring(ring, cnt+1); + OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); +} + +/* no-op packet: */ +static inline void +OUT_PKT2(struct msm_ringbuffer *ring) +{ + adreno_wait_ring(ring, 1); + OUT_RING(ring, CP_TYPE2_PKT); +} + +static inline void +OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) +{ + adreno_wait_ring(ring, cnt+1); + OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); +} + +static inline u32 PM4_PARITY(u32 val) +{ + return (0x9669 >> (0xF & (val ^ + (val >> 4) ^ (val >> 8) ^ (val >> 12) ^ + (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^ + (val >> 28)))) & 1; +} + +/* Maximum number of values that can be executed for one opcode */ +#define TYPE4_MAX_PAYLOAD 127 + +#define PKT4(_reg, _cnt) \ + (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \ + (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27)) + +static inline void +OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) +{ + adreno_wait_ring(ring, cnt + 1); + OUT_RING(ring, PKT4(regindx, cnt)); +} + +static inline void +OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) +{ + adreno_wait_ring(ring, cnt + 1); + OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | + ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); +} + +struct msm_gpu *a2xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a4xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a5xx_gpu_init(struct drm_device *dev); +struct msm_gpu *a6xx_gpu_init(struct drm_device *dev); + +static inline uint32_t get_wptr(struct msm_ringbuffer *ring) +{ + return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); +} + +/* + * Given a register and a count, return a value to program into + * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len + * registers starting at _reg. + * + * The register base needs to be a multiple of the length. If it is not, the + * hardware will quietly mask off the bits for you and shift the size. For + * example, if you intend the protection to start at 0x07 for a length of 4 + * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might + * expose registers you intended to protect! + */ +#define ADRENO_PROTECT_RW(_reg, _len) \ + ((1 << 30) | (1 << 29) | \ + ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) + +/* + * Same as above, but allow reads over the range. For areas of mixed use (such + * as performance counters) this allows us to protect a much larger range with a + * single register + */ +#define ADRENO_PROTECT_RDONLY(_reg, _len) \ + ((1 << 29) \ + ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF)) + + +#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \ + readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \ + interval, timeout) + +#endif /* __ADRENO_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h new file mode 100644 index 000000000..7aecf920f --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h @@ -0,0 +1,2365 @@ +#ifndef ADRENO_PM4_XML +#define ADRENO_PM4_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a2xx.xml ( 90810 bytes, from 2021-06-21 15:24:24) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 14609 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 69086 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2021-11-24 23:05:10) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113358 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149512 bytes, from 2022-01-31 23:06:21) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx.xml ( 184954 bytes, from 2022-03-03 16:41:33) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11331 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 6038 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2924 bytes, from 2021-07-22 15:21:56) + +Copyright (C) 2013-2022 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum vgt_event_type { + VS_DEALLOC = 0, + PS_DEALLOC = 1, + VS_DONE_TS = 2, + PS_DONE_TS = 3, + CACHE_FLUSH_TS = 4, + CONTEXT_DONE = 5, + CACHE_FLUSH = 6, + VIZQUERY_START = 7, + HLSQ_FLUSH = 7, + VIZQUERY_END = 8, + SC_WAIT_WC = 9, + WRITE_PRIMITIVE_COUNTS = 9, + START_PRIMITIVE_CTRS = 11, + STOP_PRIMITIVE_CTRS = 12, + RST_PIX_CNT = 13, + RST_VTX_CNT = 14, + TILE_FLUSH = 15, + STAT_EVENT = 16, + CACHE_FLUSH_AND_INV_TS_EVENT = 20, + ZPASS_DONE = 21, + CACHE_FLUSH_AND_INV_EVENT = 22, + RB_DONE_TS = 22, + PERFCOUNTER_START = 23, + PERFCOUNTER_STOP = 24, + VS_FETCH_DONE = 27, + FACENESS_FLUSH = 28, + WT_DONE_TS = 8, + FLUSH_SO_0 = 17, + FLUSH_SO_1 = 18, + FLUSH_SO_2 = 19, + FLUSH_SO_3 = 20, + PC_CCU_INVALIDATE_DEPTH = 24, + PC_CCU_INVALIDATE_COLOR = 25, + PC_CCU_RESOLVE_TS = 26, + PC_CCU_FLUSH_DEPTH_TS = 28, + PC_CCU_FLUSH_COLOR_TS = 29, + BLIT = 30, + UNK_25 = 37, + LRZ_FLUSH = 38, + BLIT_OP_FILL_2D = 39, + BLIT_OP_COPY_2D = 40, + BLIT_OP_SCALE_2D = 42, + CONTEXT_DONE_2D = 43, + UNK_2C = 44, + UNK_2D = 45, + CACHE_INVALIDATE = 49, +}; + +enum pc_di_primtype { + DI_PT_NONE = 0, + DI_PT_POINTLIST_PSIZE = 1, + DI_PT_LINELIST = 2, + DI_PT_LINESTRIP = 3, + DI_PT_TRILIST = 4, + DI_PT_TRIFAN = 5, + DI_PT_TRISTRIP = 6, + DI_PT_LINELOOP = 7, + DI_PT_RECTLIST = 8, + DI_PT_POINTLIST = 9, + DI_PT_LINE_ADJ = 10, + DI_PT_LINESTRIP_ADJ = 11, + DI_PT_TRI_ADJ = 12, + DI_PT_TRISTRIP_ADJ = 13, + DI_PT_PATCHES0 = 31, + DI_PT_PATCHES1 = 32, + DI_PT_PATCHES2 = 33, + DI_PT_PATCHES3 = 34, + DI_PT_PATCHES4 = 35, + DI_PT_PATCHES5 = 36, + DI_PT_PATCHES6 = 37, + DI_PT_PATCHES7 = 38, + DI_PT_PATCHES8 = 39, + DI_PT_PATCHES9 = 40, + DI_PT_PATCHES10 = 41, + DI_PT_PATCHES11 = 42, + DI_PT_PATCHES12 = 43, + DI_PT_PATCHES13 = 44, + DI_PT_PATCHES14 = 45, + DI_PT_PATCHES15 = 46, + DI_PT_PATCHES16 = 47, + DI_PT_PATCHES17 = 48, + DI_PT_PATCHES18 = 49, + DI_PT_PATCHES19 = 50, + DI_PT_PATCHES20 = 51, + DI_PT_PATCHES21 = 52, + DI_PT_PATCHES22 = 53, + DI_PT_PATCHES23 = 54, + DI_PT_PATCHES24 = 55, + DI_PT_PATCHES25 = 56, + DI_PT_PATCHES26 = 57, + DI_PT_PATCHES27 = 58, + DI_PT_PATCHES28 = 59, + DI_PT_PATCHES29 = 60, + DI_PT_PATCHES30 = 61, + DI_PT_PATCHES31 = 62, +}; + +enum pc_di_src_sel { + DI_SRC_SEL_DMA = 0, + DI_SRC_SEL_IMMEDIATE = 1, + DI_SRC_SEL_AUTO_INDEX = 2, + DI_SRC_SEL_AUTO_XFB = 3, +}; + +enum pc_di_face_cull_sel { + DI_FACE_CULL_NONE = 0, + DI_FACE_CULL_FETCH = 1, + DI_FACE_BACKFACE_CULL = 2, + DI_FACE_FRONTFACE_CULL = 3, +}; + +enum pc_di_index_size { + INDEX_SIZE_IGN = 0, + INDEX_SIZE_16_BIT = 0, + INDEX_SIZE_32_BIT = 1, + INDEX_SIZE_8_BIT = 2, + INDEX_SIZE_INVALID = 0, +}; + +enum pc_di_vis_cull_mode { + IGNORE_VISIBILITY = 0, + USE_VISIBILITY = 1, +}; + +enum adreno_pm4_packet_type { + CP_TYPE0_PKT = 0, + CP_TYPE1_PKT = 0x40000000, + CP_TYPE2_PKT = 0x80000000, + CP_TYPE3_PKT = 0xc0000000, + CP_TYPE4_PKT = 0x40000000, + CP_TYPE7_PKT = 0x70000000, +}; + +enum adreno_pm4_type3_packets { + CP_ME_INIT = 72, + CP_NOP = 16, + CP_PREEMPT_ENABLE = 28, + CP_PREEMPT_TOKEN = 30, + CP_INDIRECT_BUFFER = 63, + CP_INDIRECT_BUFFER_CHAIN = 87, + CP_INDIRECT_BUFFER_PFD = 55, + CP_WAIT_FOR_IDLE = 38, + CP_WAIT_REG_MEM = 60, + CP_WAIT_REG_EQ = 82, + CP_WAIT_REG_GTE = 83, + CP_WAIT_UNTIL_READ = 92, + CP_WAIT_IB_PFD_COMPLETE = 93, + CP_REG_RMW = 33, + CP_SET_BIN_DATA = 47, + CP_SET_BIN_DATA5 = 47, + CP_REG_TO_MEM = 62, + CP_MEM_WRITE = 61, + CP_MEM_WRITE_CNTR = 79, + CP_COND_EXEC = 68, + CP_COND_WRITE = 69, + CP_COND_WRITE5 = 69, + CP_EVENT_WRITE = 70, + CP_EVENT_WRITE_SHD = 88, + CP_EVENT_WRITE_CFL = 89, + CP_EVENT_WRITE_ZPD = 91, + CP_RUN_OPENCL = 49, + CP_DRAW_INDX = 34, + CP_DRAW_INDX_2 = 54, + CP_DRAW_INDX_BIN = 52, + CP_DRAW_INDX_2_BIN = 53, + CP_VIZ_QUERY = 35, + CP_SET_STATE = 37, + CP_SET_CONSTANT = 45, + CP_IM_LOAD = 39, + CP_IM_LOAD_IMMEDIATE = 43, + CP_LOAD_CONSTANT_CONTEXT = 46, + CP_INVALIDATE_STATE = 59, + CP_SET_SHADER_BASES = 74, + CP_SET_BIN_MASK = 80, + CP_SET_BIN_SELECT = 81, + CP_CONTEXT_UPDATE = 94, + CP_INTERRUPT = 64, + CP_IM_STORE = 44, + CP_SET_DRAW_INIT_FLAGS = 75, + CP_SET_PROTECTED_MODE = 95, + CP_BOOTSTRAP_UCODE = 111, + CP_LOAD_STATE = 48, + CP_LOAD_STATE4 = 48, + CP_COND_INDIRECT_BUFFER_PFE = 58, + CP_COND_INDIRECT_BUFFER_PFD = 50, + CP_INDIRECT_BUFFER_PFE = 63, + CP_SET_BIN = 76, + CP_TEST_TWO_MEMS = 113, + CP_REG_WR_NO_CTXT = 120, + CP_RECORD_PFP_TIMESTAMP = 17, + CP_SET_SECURE_MODE = 102, + CP_WAIT_FOR_ME = 19, + CP_SET_DRAW_STATE = 67, + CP_DRAW_INDX_OFFSET = 56, + CP_DRAW_INDIRECT = 40, + CP_DRAW_INDX_INDIRECT = 41, + CP_DRAW_INDIRECT_MULTI = 42, + CP_DRAW_AUTO = 36, + CP_DRAW_PRED_ENABLE_GLOBAL = 25, + CP_DRAW_PRED_ENABLE_LOCAL = 26, + CP_DRAW_PRED_SET = 78, + CP_WIDE_REG_WRITE = 116, + CP_SCRATCH_TO_REG = 77, + CP_REG_TO_SCRATCH = 74, + CP_WAIT_MEM_WRITES = 18, + CP_COND_REG_EXEC = 71, + CP_MEM_TO_REG = 66, + CP_EXEC_CS_INDIRECT = 65, + CP_EXEC_CS = 51, + CP_PERFCOUNTER_ACTION = 80, + CP_SMMU_TABLE_UPDATE = 83, + CP_SET_MARKER = 101, + CP_SET_PSEUDO_REG = 86, + CP_CONTEXT_REG_BUNCH = 92, + CP_YIELD_ENABLE = 28, + CP_SKIP_IB2_ENABLE_GLOBAL = 29, + CP_SKIP_IB2_ENABLE_LOCAL = 35, + CP_SET_SUBDRAW_SIZE = 53, + CP_WHERE_AM_I = 98, + CP_SET_VISIBILITY_OVERRIDE = 100, + CP_PREEMPT_ENABLE_GLOBAL = 105, + CP_PREEMPT_ENABLE_LOCAL = 106, + CP_CONTEXT_SWITCH_YIELD = 107, + CP_SET_RENDER_MODE = 108, + CP_COMPUTE_CHECKPOINT = 110, + CP_MEM_TO_MEM = 115, + CP_BLIT = 44, + CP_REG_TEST = 57, + CP_SET_MODE = 99, + CP_LOAD_STATE6_GEOM = 50, + CP_LOAD_STATE6_FRAG = 52, + CP_LOAD_STATE6 = 54, + IN_IB_PREFETCH_END = 23, + IN_SUBBLK_PREFETCH = 31, + IN_INSTR_PREFETCH = 32, + IN_INSTR_MATCH = 71, + IN_CONST_PREFETCH = 73, + IN_INCR_UPDT_STATE = 85, + IN_INCR_UPDT_CONST = 86, + IN_INCR_UPDT_INSTR = 87, + PKT4 = 4, + CP_SCRATCH_WRITE = 76, + CP_REG_TO_MEM_OFFSET_MEM = 116, + CP_REG_TO_MEM_OFFSET_REG = 114, + CP_WAIT_MEM_GTE = 20, + CP_WAIT_TWO_REGS = 112, + CP_MEMCPY = 117, + CP_SET_BIN_DATA5_OFFSET = 46, + CP_SET_CTXSWITCH_IB = 85, + CP_REG_WRITE = 109, + CP_START_BIN = 80, + CP_END_BIN = 81, +}; + +enum adreno_state_block { + SB_VERT_TEX = 0, + SB_VERT_MIPADDR = 1, + SB_FRAG_TEX = 2, + SB_FRAG_MIPADDR = 3, + SB_VERT_SHADER = 4, + SB_GEOM_SHADER = 5, + SB_FRAG_SHADER = 6, + SB_COMPUTE_SHADER = 7, +}; + +enum adreno_state_type { + ST_SHADER = 0, + ST_CONSTANTS = 1, +}; + +enum adreno_state_src { + SS_DIRECT = 0, + SS_INVALID_ALL_IC = 2, + SS_INVALID_PART_IC = 3, + SS_INDIRECT = 4, + SS_INDIRECT_TCM = 5, + SS_INDIRECT_STM = 6, +}; + +enum a4xx_state_block { + SB4_VS_TEX = 0, + SB4_HS_TEX = 1, + SB4_DS_TEX = 2, + SB4_GS_TEX = 3, + SB4_FS_TEX = 4, + SB4_CS_TEX = 5, + SB4_VS_SHADER = 8, + SB4_HS_SHADER = 9, + SB4_DS_SHADER = 10, + SB4_GS_SHADER = 11, + SB4_FS_SHADER = 12, + SB4_CS_SHADER = 13, + SB4_SSBO = 14, + SB4_CS_SSBO = 15, +}; + +enum a4xx_state_type { + ST4_SHADER = 0, + ST4_CONSTANTS = 1, + ST4_UBO = 2, +}; + +enum a4xx_state_src { + SS4_DIRECT = 0, + SS4_INDIRECT = 2, +}; + +enum a6xx_state_block { + SB6_VS_TEX = 0, + SB6_HS_TEX = 1, + SB6_DS_TEX = 2, + SB6_GS_TEX = 3, + SB6_FS_TEX = 4, + SB6_CS_TEX = 5, + SB6_VS_SHADER = 8, + SB6_HS_SHADER = 9, + SB6_DS_SHADER = 10, + SB6_GS_SHADER = 11, + SB6_FS_SHADER = 12, + SB6_CS_SHADER = 13, + SB6_IBO = 14, + SB6_CS_IBO = 15, +}; + +enum a6xx_state_type { + ST6_SHADER = 0, + ST6_CONSTANTS = 1, + ST6_UBO = 2, + ST6_IBO = 3, +}; + +enum a6xx_state_src { + SS6_DIRECT = 0, + SS6_BINDLESS = 1, + SS6_INDIRECT = 2, + SS6_UBO = 3, +}; + +enum a4xx_index_size { + INDEX4_SIZE_8_BIT = 0, + INDEX4_SIZE_16_BIT = 1, + INDEX4_SIZE_32_BIT = 2, +}; + +enum a6xx_patch_type { + TESS_QUADS = 0, + TESS_TRIANGLES = 1, + TESS_ISOLINES = 2, +}; + +enum a6xx_draw_indirect_opcode { + INDIRECT_OP_NORMAL = 2, + INDIRECT_OP_INDEXED = 4, + INDIRECT_OP_INDIRECT_COUNT = 6, + INDIRECT_OP_INDIRECT_COUNT_INDEXED = 7, +}; + +enum cp_draw_pred_src { + PRED_SRC_MEM = 5, +}; + +enum cp_draw_pred_test { + NE_0_PASS = 0, + EQ_0_PASS = 1, +}; + +enum cp_cond_function { + WRITE_ALWAYS = 0, + WRITE_LT = 1, + WRITE_LE = 2, + WRITE_EQ = 3, + WRITE_NE = 4, + WRITE_GE = 5, + WRITE_GT = 6, +}; + +enum render_mode_cmd { + BYPASS = 1, + BINNING = 2, + GMEM = 3, + BLIT2D = 5, + BLIT2DSCALE = 7, + END2D = 8, +}; + +enum cp_blit_cmd { + BLIT_OP_FILL = 0, + BLIT_OP_COPY = 1, + BLIT_OP_SCALE = 3, +}; + +enum a6xx_marker { + RM6_BYPASS = 1, + RM6_BINNING = 2, + RM6_GMEM = 4, + RM6_ENDVIS = 5, + RM6_RESOLVE = 6, + RM6_YIELD = 7, + RM6_COMPUTE = 8, + RM6_BLIT2DSCALE = 12, + RM6_IB1LIST_START = 13, + RM6_IB1LIST_END = 14, + RM6_IFPC_ENABLE = 256, + RM6_IFPC_DISABLE = 257, +}; + +enum pseudo_reg { + SMMU_INFO = 0, + NON_SECURE_SAVE_ADDR = 1, + SECURE_SAVE_ADDR = 2, + NON_PRIV_SAVE_ADDR = 3, + COUNTER = 4, +}; + +enum compare_mode { + PRED_TEST = 1, + REG_COMPARE = 2, + RENDER_MODE = 3, +}; + +enum ctxswitch_ib { + RESTORE_IB = 0, + YIELD_RESTORE_IB = 1, + SAVE_IB = 2, + RB_SAVE_IB = 3, +}; + +enum reg_tracker { + TRACK_CNTL_REG = 1, + TRACK_RENDER_CNTL = 2, + UNK_EVENT_WRITE = 4, +}; + +#define REG_CP_LOAD_STATE_0 0x00000000 +#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff +#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0 +static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val) +{ + return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK; +} +#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000 +#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16 +static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val) +{ + return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK; +} +#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000 +#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19 +static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val) +{ + return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK; +} +#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000 +#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22 +static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val) +{ + return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK; +} + +#define REG_CP_LOAD_STATE_1 0x00000001 +#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003 +#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0 +static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val) +{ + return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK; +} +#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc +#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2 +static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val) +{ + return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK; +} + +#define REG_CP_LOAD_STATE4_0 0x00000000 +#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff +#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0 +static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val) +{ + return ((val) << CP_LOAD_STATE4_0_DST_OFF__SHIFT) & CP_LOAD_STATE4_0_DST_OFF__MASK; +} +#define CP_LOAD_STATE4_0_STATE_SRC__MASK 0x00030000 +#define CP_LOAD_STATE4_0_STATE_SRC__SHIFT 16 +static inline uint32_t CP_LOAD_STATE4_0_STATE_SRC(enum a4xx_state_src val) +{ + return ((val) << CP_LOAD_STATE4_0_STATE_SRC__SHIFT) & CP_LOAD_STATE4_0_STATE_SRC__MASK; +} +#define CP_LOAD_STATE4_0_STATE_BLOCK__MASK 0x003c0000 +#define CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT 18 +static inline uint32_t CP_LOAD_STATE4_0_STATE_BLOCK(enum a4xx_state_block val) +{ + return ((val) << CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE4_0_STATE_BLOCK__MASK; +} +#define CP_LOAD_STATE4_0_NUM_UNIT__MASK 0xffc00000 +#define CP_LOAD_STATE4_0_NUM_UNIT__SHIFT 22 +static inline uint32_t CP_LOAD_STATE4_0_NUM_UNIT(uint32_t val) +{ + return ((val) << CP_LOAD_STATE4_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE4_0_NUM_UNIT__MASK; +} + +#define REG_CP_LOAD_STATE4_1 0x00000001 +#define CP_LOAD_STATE4_1_STATE_TYPE__MASK 0x00000003 +#define CP_LOAD_STATE4_1_STATE_TYPE__SHIFT 0 +static inline uint32_t CP_LOAD_STATE4_1_STATE_TYPE(enum a4xx_state_type val) +{ + return ((val) << CP_LOAD_STATE4_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE4_1_STATE_TYPE__MASK; +} +#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK 0xfffffffc +#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT 2 +static inline uint32_t CP_LOAD_STATE4_1_EXT_SRC_ADDR(uint32_t val) +{ + return ((val >> 2) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK; +} + +#define REG_CP_LOAD_STATE4_2 0x00000002 +#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK 0xffffffff +#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT 0 +static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val) +{ + return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK; +} + +#define REG_CP_LOAD_STATE6_0 0x00000000 +#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff +#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0 +static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val) +{ + return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK; +} +#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x0000c000 +#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14 +static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val) +{ + return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK; +} +#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000 +#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16 +static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val) +{ + return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK; +} +#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000 +#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18 +static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val) +{ + return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK; +} +#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000 +#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22 +static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val) +{ + return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK; +} + +#define REG_CP_LOAD_STATE6_1 0x00000001 +#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc +#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2 +static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val) +{ + return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK; +} + +#define REG_CP_LOAD_STATE6_2 0x00000002 +#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff +#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0 +static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val) +{ + return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK; +} + +#define REG_CP_LOAD_STATE6_EXT_SRC_ADDR 0x00000001 + +#define REG_CP_DRAW_INDX_0 0x00000000 +#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff +#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK; +} + +#define REG_CP_DRAW_INDX_1 0x00000001 +#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f +#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK; +} +#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0 +#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6 +static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK; +} +#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600 +#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9 +static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK; +} +#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800 +#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11 +static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK; +} +#define CP_DRAW_INDX_1_NOT_EOP 0x00001000 +#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000 +#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000 +#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24 +static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK; +} + +#define REG_CP_DRAW_INDX_2 0x00000002 +#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff +#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_3 0x00000003 +#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff +#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK; +} + +#define REG_CP_DRAW_INDX_4 0x00000004 +#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff +#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK; +} + +#define REG_CP_DRAW_INDX_2_0 0x00000000 +#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff +#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK; +} + +#define REG_CP_DRAW_INDX_2_1 0x00000001 +#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f +#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK; +} +#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0 +#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6 +static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK; +} +#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600 +#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9 +static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK; +} +#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800 +#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11 +static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val) +{ + return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK; +} +#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000 +#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000 +#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 +#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000 +#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24 +static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK; +} + +#define REG_CP_DRAW_INDX_2_2 0x00000002 +#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff +#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000 +#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f +#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0 +#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300 +#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00 +#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK 0x00003000 +#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT 12 +static inline uint32_t CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(enum a6xx_patch_type val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK; +} +#define CP_DRAW_INDX_OFFSET_0_GS_ENABLE 0x00010000 +#define CP_DRAW_INDX_OFFSET_0_TESS_ENABLE 0x00020000 + +#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 +#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 +#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003 +#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK; +} + + +#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004 +#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005 +#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004 + +#define REG_CP_DRAW_INDX_OFFSET_6 0x00000006 +#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004 +#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK; +} + +#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005 +#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff +#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0 +static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val) +{ + return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK; +} + +#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000 +#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f +#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0 +static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK; +} +#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0 +#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK; +} +#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300 +#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8 +static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK; +} +#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00 +#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10 +static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val) +{ + return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK; +} +#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK 0x00003000 +#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT 12 +static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val) +{ + return ((val) << A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK; +} +#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE 0x00010000 +#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE 0x00020000 + + +#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001 +#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff +#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0 +static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val) +{ + return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK; +} + + +#define REG_A5XX_CP_DRAW_INDIRECT_1 0x00000001 +#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK 0xffffffff +#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT 0 +static inline uint32_t A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO(uint32_t val) +{ + return ((val) << A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK; +} + +#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002 +#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff +#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0 +static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val) +{ + return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK; +} + +#define REG_A5XX_CP_DRAW_INDIRECT_INDIRECT 0x00000001 + +#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000 +#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f +#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK; +} +#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0 +#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK; +} +#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300 +#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK; +} +#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00 +#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK; +} +#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK 0x00003000 +#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT 12 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK; +} +#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE 0x00010000 +#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE 0x00020000 + + +#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001 +#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff +#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK; +} + +#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002 +#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff +#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK; +} + +#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003 +#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff +#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0 +static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val) +{ + return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK; +} + + +#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001 +#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff +#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0 +static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val) +{ + return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK; +} + +#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002 +#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff +#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0 +static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val) +{ + return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK; +} + +#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE 0x00000001 + +#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003 +#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff +#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0 +static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val) +{ + return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK; +} + +#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004 +#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff +#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0 +static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val) +{ + return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK; +} + +#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005 +#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff +#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0 +static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val) +{ + return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK; +} + +#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT 0x00000004 + +#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_0 0x00000000 +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK 0x0000003f +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT 0 +static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE(enum pc_di_primtype val) +{ + return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK; +} +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK 0x000000c0 +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT 6 +static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT(enum pc_di_src_sel val) +{ + return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK; +} +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK 0x00000300 +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT 8 +static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL(enum pc_di_vis_cull_mode val) +{ + return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK; +} +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK 0x00000c00 +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT 10 +static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE(enum a4xx_index_size val) +{ + return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK; +} +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK 0x00003000 +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT 12 +static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE(enum a6xx_patch_type val) +{ + return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK; +} +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE 0x00010000 +#define A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE 0x00020000 + +#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_1 0x00000001 +#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK 0x0000000f +#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT 0 +static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(enum a6xx_draw_indirect_opcode val) +{ + return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK; +} +#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK 0x003fff00 +#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT 8 +static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val) +{ + return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK; +} + +#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT 0x00000002 + + +#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003 + +#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005 + + +#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDEXED 0x00000003 + +#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDEXED 0x00000005 + +#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDEXED 0x00000006 + +#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDEXED 0x00000008 + + +#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT 0x00000003 + +#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT 0x00000005 + +#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT 0x00000007 + + +#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDIRECT_INDEXED 0x00000003 + +#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDIRECT_INDEXED 0x00000005 + +#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT_INDEXED 0x00000006 + +#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT_INDEXED 0x00000008 + +#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT_INDEXED 0x0000000a + +#define REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 0x00000000 +#define CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE 0x00000001 + +#define REG_CP_DRAW_PRED_ENABLE_LOCAL_0 0x00000000 +#define CP_DRAW_PRED_ENABLE_LOCAL_0_ENABLE 0x00000001 + +#define REG_CP_DRAW_PRED_SET_0 0x00000000 +#define CP_DRAW_PRED_SET_0_SRC__MASK 0x000000f0 +#define CP_DRAW_PRED_SET_0_SRC__SHIFT 4 +static inline uint32_t CP_DRAW_PRED_SET_0_SRC(enum cp_draw_pred_src val) +{ + return ((val) << CP_DRAW_PRED_SET_0_SRC__SHIFT) & CP_DRAW_PRED_SET_0_SRC__MASK; +} +#define CP_DRAW_PRED_SET_0_TEST__MASK 0x00000100 +#define CP_DRAW_PRED_SET_0_TEST__SHIFT 8 +static inline uint32_t CP_DRAW_PRED_SET_0_TEST(enum cp_draw_pred_test val) +{ + return ((val) << CP_DRAW_PRED_SET_0_TEST__SHIFT) & CP_DRAW_PRED_SET_0_TEST__MASK; +} + +#define REG_CP_DRAW_PRED_SET_MEM_ADDR 0x00000001 + +static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; } + +static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; } +#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff +#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK; +} +#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000 +#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000 +#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000 +#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000 +#define CP_SET_DRAW_STATE__0_BINNING 0x00100000 +#define CP_SET_DRAW_STATE__0_GMEM 0x00200000 +#define CP_SET_DRAW_STATE__0_SYSMEM 0x00400000 +#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000 +#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24 +static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK; +} + +static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; } +#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff +#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK; +} + +static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; } +#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff +#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0 +static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val) +{ + return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK; +} + +#define REG_CP_SET_BIN_0 0x00000000 + +#define REG_CP_SET_BIN_1 0x00000001 +#define CP_SET_BIN_1_X1__MASK 0x0000ffff +#define CP_SET_BIN_1_X1__SHIFT 0 +static inline uint32_t CP_SET_BIN_1_X1(uint32_t val) +{ + return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK; +} +#define CP_SET_BIN_1_Y1__MASK 0xffff0000 +#define CP_SET_BIN_1_Y1__SHIFT 16 +static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val) +{ + return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK; +} + +#define REG_CP_SET_BIN_2 0x00000002 +#define CP_SET_BIN_2_X2__MASK 0x0000ffff +#define CP_SET_BIN_2_X2__SHIFT 0 +static inline uint32_t CP_SET_BIN_2_X2(uint32_t val) +{ + return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK; +} +#define CP_SET_BIN_2_Y2__MASK 0xffff0000 +#define CP_SET_BIN_2_Y2__SHIFT 16 +static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val) +{ + return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK; +} + +#define REG_CP_SET_BIN_DATA_0 0x00000000 +#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff +#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK; +} + +#define REG_CP_SET_BIN_DATA_1 0x00000001 +#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff +#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK; +} + +#define REG_CP_SET_BIN_DATA5_0 0x00000000 +#define CP_SET_BIN_DATA5_0_VSC_SIZE__MASK 0x003f0000 +#define CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT 16 +static inline uint32_t CP_SET_BIN_DATA5_0_VSC_SIZE(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_0_VSC_SIZE__MASK; +} +#define CP_SET_BIN_DATA5_0_VSC_N__MASK 0x07c00000 +#define CP_SET_BIN_DATA5_0_VSC_N__SHIFT 22 +static inline uint32_t CP_SET_BIN_DATA5_0_VSC_N(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_0_VSC_N__MASK; +} + +#define REG_CP_SET_BIN_DATA5_1 0x00000001 +#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK 0xffffffff +#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT) & CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK; +} + +#define REG_CP_SET_BIN_DATA5_2 0x00000002 +#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK 0xffffffff +#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT) & CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK; +} + +#define REG_CP_SET_BIN_DATA5_3 0x00000003 +#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK 0xffffffff +#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK; +} + +#define REG_CP_SET_BIN_DATA5_4 0x00000004 +#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK 0xffffffff +#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK; +} + +#define REG_CP_SET_BIN_DATA5_5 0x00000005 +#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK 0xffffffff +#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK; +} + +#define REG_CP_SET_BIN_DATA5_6 0x00000006 +#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK 0xffffffff +#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK; +} + +#define REG_CP_SET_BIN_DATA5_OFFSET_0 0x00000000 +#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK 0x003f0000 +#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT 16 +static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK; +} +#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK 0x07c00000 +#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT 22 +static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_N(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK; +} + +#define REG_CP_SET_BIN_DATA5_OFFSET_1 0x00000001 +#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK 0xffffffff +#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK; +} + +#define REG_CP_SET_BIN_DATA5_OFFSET_2 0x00000002 +#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK 0xffffffff +#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK; +} + +#define REG_CP_SET_BIN_DATA5_OFFSET_3 0x00000003 +#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK 0xffffffff +#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT 0 +static inline uint32_t CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET(uint32_t val) +{ + return ((val) << CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK; +} + +#define REG_CP_REG_RMW_0 0x00000000 +#define CP_REG_RMW_0_DST_REG__MASK 0x0003ffff +#define CP_REG_RMW_0_DST_REG__SHIFT 0 +static inline uint32_t CP_REG_RMW_0_DST_REG(uint32_t val) +{ + return ((val) << CP_REG_RMW_0_DST_REG__SHIFT) & CP_REG_RMW_0_DST_REG__MASK; +} +#define CP_REG_RMW_0_ROTATE__MASK 0x1f000000 +#define CP_REG_RMW_0_ROTATE__SHIFT 24 +static inline uint32_t CP_REG_RMW_0_ROTATE(uint32_t val) +{ + return ((val) << CP_REG_RMW_0_ROTATE__SHIFT) & CP_REG_RMW_0_ROTATE__MASK; +} +#define CP_REG_RMW_0_SRC1_ADD 0x20000000 +#define CP_REG_RMW_0_SRC1_IS_REG 0x40000000 +#define CP_REG_RMW_0_SRC0_IS_REG 0x80000000 + +#define REG_CP_REG_RMW_1 0x00000001 +#define CP_REG_RMW_1_SRC0__MASK 0xffffffff +#define CP_REG_RMW_1_SRC0__SHIFT 0 +static inline uint32_t CP_REG_RMW_1_SRC0(uint32_t val) +{ + return ((val) << CP_REG_RMW_1_SRC0__SHIFT) & CP_REG_RMW_1_SRC0__MASK; +} + +#define REG_CP_REG_RMW_2 0x00000002 +#define CP_REG_RMW_2_SRC1__MASK 0xffffffff +#define CP_REG_RMW_2_SRC1__SHIFT 0 +static inline uint32_t CP_REG_RMW_2_SRC1(uint32_t val) +{ + return ((val) << CP_REG_RMW_2_SRC1__SHIFT) & CP_REG_RMW_2_SRC1__MASK; +} + +#define REG_CP_REG_TO_MEM_0 0x00000000 +#define CP_REG_TO_MEM_0_REG__MASK 0x0003ffff +#define CP_REG_TO_MEM_0_REG__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK; +} +#define CP_REG_TO_MEM_0_CNT__MASK 0x3ffc0000 +#define CP_REG_TO_MEM_0_CNT__SHIFT 18 +static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK; +} +#define CP_REG_TO_MEM_0_64B 0x40000000 +#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000 + +#define REG_CP_REG_TO_MEM_1 0x00000001 +#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff +#define CP_REG_TO_MEM_1_DEST__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK; +} + +#define REG_CP_REG_TO_MEM_2 0x00000002 +#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff +#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK; +} + +#define REG_CP_REG_TO_MEM_OFFSET_REG_0 0x00000000 +#define CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK 0x0003ffff +#define CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_REG(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK; +} +#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK 0x3ffc0000 +#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT 18 +static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_CNT(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK; +} +#define CP_REG_TO_MEM_OFFSET_REG_0_64B 0x40000000 +#define CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE 0x80000000 + +#define REG_CP_REG_TO_MEM_OFFSET_REG_1 0x00000001 +#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK 0xffffffff +#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_1_DEST(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK; +} + +#define REG_CP_REG_TO_MEM_OFFSET_REG_2 0x00000002 +#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK 0xffffffff +#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK; +} + +#define REG_CP_REG_TO_MEM_OFFSET_REG_3 0x00000003 +#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK 0x0003ffff +#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK; +} +#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH 0x00080000 + +#define REG_CP_REG_TO_MEM_OFFSET_MEM_0 0x00000000 +#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK 0x0003ffff +#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_REG(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK; +} +#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK 0x3ffc0000 +#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT 18 +static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_CNT(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK; +} +#define CP_REG_TO_MEM_OFFSET_MEM_0_64B 0x40000000 +#define CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE 0x80000000 + +#define REG_CP_REG_TO_MEM_OFFSET_MEM_1 0x00000001 +#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK 0xffffffff +#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_1_DEST(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK; +} + +#define REG_CP_REG_TO_MEM_OFFSET_MEM_2 0x00000002 +#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK 0xffffffff +#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK; +} + +#define REG_CP_REG_TO_MEM_OFFSET_MEM_3 0x00000003 +#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK 0xffffffff +#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK; +} + +#define REG_CP_REG_TO_MEM_OFFSET_MEM_4 0x00000004 +#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK 0xffffffff +#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT 0 +static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI(uint32_t val) +{ + return ((val) << CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK; +} + +#define REG_CP_MEM_TO_REG_0 0x00000000 +#define CP_MEM_TO_REG_0_REG__MASK 0x0003ffff +#define CP_MEM_TO_REG_0_REG__SHIFT 0 +static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val) +{ + return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK; +} +#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000 +#define CP_MEM_TO_REG_0_CNT__SHIFT 19 +static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val) +{ + return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK; +} +#define CP_MEM_TO_REG_0_SHIFT_BY_2 0x40000000 +#define CP_MEM_TO_REG_0_UNK31 0x80000000 + +#define REG_CP_MEM_TO_REG_1 0x00000001 +#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff +#define CP_MEM_TO_REG_1_SRC__SHIFT 0 +static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val) +{ + return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK; +} + +#define REG_CP_MEM_TO_REG_2 0x00000002 +#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff +#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0 +static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val) +{ + return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK; +} + +#define REG_CP_MEM_TO_MEM_0 0x00000000 +#define CP_MEM_TO_MEM_0_NEG_A 0x00000001 +#define CP_MEM_TO_MEM_0_NEG_B 0x00000002 +#define CP_MEM_TO_MEM_0_NEG_C 0x00000004 +#define CP_MEM_TO_MEM_0_DOUBLE 0x20000000 +#define CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES 0x40000000 +#define CP_MEM_TO_MEM_0_UNK31 0x80000000 + +#define REG_CP_MEMCPY_0 0x00000000 +#define CP_MEMCPY_0_DWORDS__MASK 0xffffffff +#define CP_MEMCPY_0_DWORDS__SHIFT 0 +static inline uint32_t CP_MEMCPY_0_DWORDS(uint32_t val) +{ + return ((val) << CP_MEMCPY_0_DWORDS__SHIFT) & CP_MEMCPY_0_DWORDS__MASK; +} + +#define REG_CP_MEMCPY_1 0x00000001 +#define CP_MEMCPY_1_SRC_LO__MASK 0xffffffff +#define CP_MEMCPY_1_SRC_LO__SHIFT 0 +static inline uint32_t CP_MEMCPY_1_SRC_LO(uint32_t val) +{ + return ((val) << CP_MEMCPY_1_SRC_LO__SHIFT) & CP_MEMCPY_1_SRC_LO__MASK; +} + +#define REG_CP_MEMCPY_2 0x00000002 +#define CP_MEMCPY_2_SRC_HI__MASK 0xffffffff +#define CP_MEMCPY_2_SRC_HI__SHIFT 0 +static inline uint32_t CP_MEMCPY_2_SRC_HI(uint32_t val) +{ + return ((val) << CP_MEMCPY_2_SRC_HI__SHIFT) & CP_MEMCPY_2_SRC_HI__MASK; +} + +#define REG_CP_MEMCPY_3 0x00000003 +#define CP_MEMCPY_3_DST_LO__MASK 0xffffffff +#define CP_MEMCPY_3_DST_LO__SHIFT 0 +static inline uint32_t CP_MEMCPY_3_DST_LO(uint32_t val) +{ + return ((val) << CP_MEMCPY_3_DST_LO__SHIFT) & CP_MEMCPY_3_DST_LO__MASK; +} + +#define REG_CP_MEMCPY_4 0x00000004 +#define CP_MEMCPY_4_DST_HI__MASK 0xffffffff +#define CP_MEMCPY_4_DST_HI__SHIFT 0 +static inline uint32_t CP_MEMCPY_4_DST_HI(uint32_t val) +{ + return ((val) << CP_MEMCPY_4_DST_HI__SHIFT) & CP_MEMCPY_4_DST_HI__MASK; +} + +#define REG_CP_REG_TO_SCRATCH_0 0x00000000 +#define CP_REG_TO_SCRATCH_0_REG__MASK 0x0003ffff +#define CP_REG_TO_SCRATCH_0_REG__SHIFT 0 +static inline uint32_t CP_REG_TO_SCRATCH_0_REG(uint32_t val) +{ + return ((val) << CP_REG_TO_SCRATCH_0_REG__SHIFT) & CP_REG_TO_SCRATCH_0_REG__MASK; +} +#define CP_REG_TO_SCRATCH_0_SCRATCH__MASK 0x00700000 +#define CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT 20 +static inline uint32_t CP_REG_TO_SCRATCH_0_SCRATCH(uint32_t val) +{ + return ((val) << CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT) & CP_REG_TO_SCRATCH_0_SCRATCH__MASK; +} +#define CP_REG_TO_SCRATCH_0_CNT__MASK 0x07000000 +#define CP_REG_TO_SCRATCH_0_CNT__SHIFT 24 +static inline uint32_t CP_REG_TO_SCRATCH_0_CNT(uint32_t val) +{ + return ((val) << CP_REG_TO_SCRATCH_0_CNT__SHIFT) & CP_REG_TO_SCRATCH_0_CNT__MASK; +} + +#define REG_CP_SCRATCH_TO_REG_0 0x00000000 +#define CP_SCRATCH_TO_REG_0_REG__MASK 0x0003ffff +#define CP_SCRATCH_TO_REG_0_REG__SHIFT 0 +static inline uint32_t CP_SCRATCH_TO_REG_0_REG(uint32_t val) +{ + return ((val) << CP_SCRATCH_TO_REG_0_REG__SHIFT) & CP_SCRATCH_TO_REG_0_REG__MASK; +} +#define CP_SCRATCH_TO_REG_0_UNK18 0x00040000 +#define CP_SCRATCH_TO_REG_0_SCRATCH__MASK 0x00700000 +#define CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT 20 +static inline uint32_t CP_SCRATCH_TO_REG_0_SCRATCH(uint32_t val) +{ + return ((val) << CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT) & CP_SCRATCH_TO_REG_0_SCRATCH__MASK; +} +#define CP_SCRATCH_TO_REG_0_CNT__MASK 0x07000000 +#define CP_SCRATCH_TO_REG_0_CNT__SHIFT 24 +static inline uint32_t CP_SCRATCH_TO_REG_0_CNT(uint32_t val) +{ + return ((val) << CP_SCRATCH_TO_REG_0_CNT__SHIFT) & CP_SCRATCH_TO_REG_0_CNT__MASK; +} + +#define REG_CP_SCRATCH_WRITE_0 0x00000000 +#define CP_SCRATCH_WRITE_0_SCRATCH__MASK 0x00700000 +#define CP_SCRATCH_WRITE_0_SCRATCH__SHIFT 20 +static inline uint32_t CP_SCRATCH_WRITE_0_SCRATCH(uint32_t val) +{ + return ((val) << CP_SCRATCH_WRITE_0_SCRATCH__SHIFT) & CP_SCRATCH_WRITE_0_SCRATCH__MASK; +} + +#define REG_CP_MEM_WRITE_0 0x00000000 +#define CP_MEM_WRITE_0_ADDR_LO__MASK 0xffffffff +#define CP_MEM_WRITE_0_ADDR_LO__SHIFT 0 +static inline uint32_t CP_MEM_WRITE_0_ADDR_LO(uint32_t val) +{ + return ((val) << CP_MEM_WRITE_0_ADDR_LO__SHIFT) & CP_MEM_WRITE_0_ADDR_LO__MASK; +} + +#define REG_CP_MEM_WRITE_1 0x00000001 +#define CP_MEM_WRITE_1_ADDR_HI__MASK 0xffffffff +#define CP_MEM_WRITE_1_ADDR_HI__SHIFT 0 +static inline uint32_t CP_MEM_WRITE_1_ADDR_HI(uint32_t val) +{ + return ((val) << CP_MEM_WRITE_1_ADDR_HI__SHIFT) & CP_MEM_WRITE_1_ADDR_HI__MASK; +} + +#define REG_CP_COND_WRITE_0 0x00000000 +#define CP_COND_WRITE_0_FUNCTION__MASK 0x00000007 +#define CP_COND_WRITE_0_FUNCTION__SHIFT 0 +static inline uint32_t CP_COND_WRITE_0_FUNCTION(enum cp_cond_function val) +{ + return ((val) << CP_COND_WRITE_0_FUNCTION__SHIFT) & CP_COND_WRITE_0_FUNCTION__MASK; +} +#define CP_COND_WRITE_0_POLL_MEMORY 0x00000010 +#define CP_COND_WRITE_0_WRITE_MEMORY 0x00000100 + +#define REG_CP_COND_WRITE_1 0x00000001 +#define CP_COND_WRITE_1_POLL_ADDR__MASK 0xffffffff +#define CP_COND_WRITE_1_POLL_ADDR__SHIFT 0 +static inline uint32_t CP_COND_WRITE_1_POLL_ADDR(uint32_t val) +{ + return ((val) << CP_COND_WRITE_1_POLL_ADDR__SHIFT) & CP_COND_WRITE_1_POLL_ADDR__MASK; +} + +#define REG_CP_COND_WRITE_2 0x00000002 +#define CP_COND_WRITE_2_REF__MASK 0xffffffff +#define CP_COND_WRITE_2_REF__SHIFT 0 +static inline uint32_t CP_COND_WRITE_2_REF(uint32_t val) +{ + return ((val) << CP_COND_WRITE_2_REF__SHIFT) & CP_COND_WRITE_2_REF__MASK; +} + +#define REG_CP_COND_WRITE_3 0x00000003 +#define CP_COND_WRITE_3_MASK__MASK 0xffffffff +#define CP_COND_WRITE_3_MASK__SHIFT 0 +static inline uint32_t CP_COND_WRITE_3_MASK(uint32_t val) +{ + return ((val) << CP_COND_WRITE_3_MASK__SHIFT) & CP_COND_WRITE_3_MASK__MASK; +} + +#define REG_CP_COND_WRITE_4 0x00000004 +#define CP_COND_WRITE_4_WRITE_ADDR__MASK 0xffffffff +#define CP_COND_WRITE_4_WRITE_ADDR__SHIFT 0 +static inline uint32_t CP_COND_WRITE_4_WRITE_ADDR(uint32_t val) +{ + return ((val) << CP_COND_WRITE_4_WRITE_ADDR__SHIFT) & CP_COND_WRITE_4_WRITE_ADDR__MASK; +} + +#define REG_CP_COND_WRITE_5 0x00000005 +#define CP_COND_WRITE_5_WRITE_DATA__MASK 0xffffffff +#define CP_COND_WRITE_5_WRITE_DATA__SHIFT 0 +static inline uint32_t CP_COND_WRITE_5_WRITE_DATA(uint32_t val) +{ + return ((val) << CP_COND_WRITE_5_WRITE_DATA__SHIFT) & CP_COND_WRITE_5_WRITE_DATA__MASK; +} + +#define REG_CP_COND_WRITE5_0 0x00000000 +#define CP_COND_WRITE5_0_FUNCTION__MASK 0x00000007 +#define CP_COND_WRITE5_0_FUNCTION__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val) +{ + return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK; +} +#define CP_COND_WRITE5_0_SIGNED_COMPARE 0x00000008 +#define CP_COND_WRITE5_0_POLL_MEMORY 0x00000010 +#define CP_COND_WRITE5_0_POLL_SCRATCH 0x00000020 +#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100 + +#define REG_CP_COND_WRITE5_1 0x00000001 +#define CP_COND_WRITE5_1_POLL_ADDR_LO__MASK 0xffffffff +#define CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_1_POLL_ADDR_LO(uint32_t val) +{ + return ((val) << CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT) & CP_COND_WRITE5_1_POLL_ADDR_LO__MASK; +} + +#define REG_CP_COND_WRITE5_2 0x00000002 +#define CP_COND_WRITE5_2_POLL_ADDR_HI__MASK 0xffffffff +#define CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_2_POLL_ADDR_HI(uint32_t val) +{ + return ((val) << CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT) & CP_COND_WRITE5_2_POLL_ADDR_HI__MASK; +} + +#define REG_CP_COND_WRITE5_3 0x00000003 +#define CP_COND_WRITE5_3_REF__MASK 0xffffffff +#define CP_COND_WRITE5_3_REF__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_3_REF(uint32_t val) +{ + return ((val) << CP_COND_WRITE5_3_REF__SHIFT) & CP_COND_WRITE5_3_REF__MASK; +} + +#define REG_CP_COND_WRITE5_4 0x00000004 +#define CP_COND_WRITE5_4_MASK__MASK 0xffffffff +#define CP_COND_WRITE5_4_MASK__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_4_MASK(uint32_t val) +{ + return ((val) << CP_COND_WRITE5_4_MASK__SHIFT) & CP_COND_WRITE5_4_MASK__MASK; +} + +#define REG_CP_COND_WRITE5_5 0x00000005 +#define CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK 0xffffffff +#define CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_5_WRITE_ADDR_LO(uint32_t val) +{ + return ((val) << CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT) & CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK; +} + +#define REG_CP_COND_WRITE5_6 0x00000006 +#define CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK 0xffffffff +#define CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_6_WRITE_ADDR_HI(uint32_t val) +{ + return ((val) << CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT) & CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK; +} + +#define REG_CP_COND_WRITE5_7 0x00000007 +#define CP_COND_WRITE5_7_WRITE_DATA__MASK 0xffffffff +#define CP_COND_WRITE5_7_WRITE_DATA__SHIFT 0 +static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val) +{ + return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK; +} + +#define REG_CP_WAIT_MEM_GTE_0 0x00000000 +#define CP_WAIT_MEM_GTE_0_RESERVED__MASK 0xffffffff +#define CP_WAIT_MEM_GTE_0_RESERVED__SHIFT 0 +static inline uint32_t CP_WAIT_MEM_GTE_0_RESERVED(uint32_t val) +{ + return ((val) << CP_WAIT_MEM_GTE_0_RESERVED__SHIFT) & CP_WAIT_MEM_GTE_0_RESERVED__MASK; +} + +#define REG_CP_WAIT_MEM_GTE_1 0x00000001 +#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK 0xffffffff +#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT 0 +static inline uint32_t CP_WAIT_MEM_GTE_1_POLL_ADDR_LO(uint32_t val) +{ + return ((val) << CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK; +} + +#define REG_CP_WAIT_MEM_GTE_2 0x00000002 +#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK 0xffffffff +#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT 0 +static inline uint32_t CP_WAIT_MEM_GTE_2_POLL_ADDR_HI(uint32_t val) +{ + return ((val) << CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK; +} + +#define REG_CP_WAIT_MEM_GTE_3 0x00000003 +#define CP_WAIT_MEM_GTE_3_REF__MASK 0xffffffff +#define CP_WAIT_MEM_GTE_3_REF__SHIFT 0 +static inline uint32_t CP_WAIT_MEM_GTE_3_REF(uint32_t val) +{ + return ((val) << CP_WAIT_MEM_GTE_3_REF__SHIFT) & CP_WAIT_MEM_GTE_3_REF__MASK; +} + +#define REG_CP_WAIT_REG_MEM_0 0x00000000 +#define CP_WAIT_REG_MEM_0_FUNCTION__MASK 0x00000007 +#define CP_WAIT_REG_MEM_0_FUNCTION__SHIFT 0 +static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val) +{ + return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK; +} +#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE 0x00000008 +#define CP_WAIT_REG_MEM_0_POLL_MEMORY 0x00000010 +#define CP_WAIT_REG_MEM_0_POLL_SCRATCH 0x00000020 +#define CP_WAIT_REG_MEM_0_WRITE_MEMORY 0x00000100 + +#define REG_CP_WAIT_REG_MEM_1 0x00000001 +#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK 0xffffffff +#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT 0 +static inline uint32_t CP_WAIT_REG_MEM_1_POLL_ADDR_LO(uint32_t val) +{ + return ((val) << CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK; +} + +#define REG_CP_WAIT_REG_MEM_2 0x00000002 +#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK 0xffffffff +#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT 0 +static inline uint32_t CP_WAIT_REG_MEM_2_POLL_ADDR_HI(uint32_t val) +{ + return ((val) << CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK; +} + +#define REG_CP_WAIT_REG_MEM_3 0x00000003 +#define CP_WAIT_REG_MEM_3_REF__MASK 0xffffffff +#define CP_WAIT_REG_MEM_3_REF__SHIFT 0 +static inline uint32_t CP_WAIT_REG_MEM_3_REF(uint32_t val) +{ + return ((val) << CP_WAIT_REG_MEM_3_REF__SHIFT) & CP_WAIT_REG_MEM_3_REF__MASK; +} + +#define REG_CP_WAIT_REG_MEM_4 0x00000004 +#define CP_WAIT_REG_MEM_4_MASK__MASK 0xffffffff +#define CP_WAIT_REG_MEM_4_MASK__SHIFT 0 +static inline uint32_t CP_WAIT_REG_MEM_4_MASK(uint32_t val) +{ + return ((val) << CP_WAIT_REG_MEM_4_MASK__SHIFT) & CP_WAIT_REG_MEM_4_MASK__MASK; +} + +#define REG_CP_WAIT_REG_MEM_5 0x00000005 +#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK 0xffffffff +#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT 0 +static inline uint32_t CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(uint32_t val) +{ + return ((val) << CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT) & CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK; +} + +#define REG_CP_WAIT_TWO_REGS_0 0x00000000 +#define CP_WAIT_TWO_REGS_0_REG0__MASK 0x0003ffff +#define CP_WAIT_TWO_REGS_0_REG0__SHIFT 0 +static inline uint32_t CP_WAIT_TWO_REGS_0_REG0(uint32_t val) +{ + return ((val) << CP_WAIT_TWO_REGS_0_REG0__SHIFT) & CP_WAIT_TWO_REGS_0_REG0__MASK; +} + +#define REG_CP_WAIT_TWO_REGS_1 0x00000001 +#define CP_WAIT_TWO_REGS_1_REG1__MASK 0x0003ffff +#define CP_WAIT_TWO_REGS_1_REG1__SHIFT 0 +static inline uint32_t CP_WAIT_TWO_REGS_1_REG1(uint32_t val) +{ + return ((val) << CP_WAIT_TWO_REGS_1_REG1__SHIFT) & CP_WAIT_TWO_REGS_1_REG1__MASK; +} + +#define REG_CP_WAIT_TWO_REGS_2 0x00000002 +#define CP_WAIT_TWO_REGS_2_REF__MASK 0xffffffff +#define CP_WAIT_TWO_REGS_2_REF__SHIFT 0 +static inline uint32_t CP_WAIT_TWO_REGS_2_REF(uint32_t val) +{ + return ((val) << CP_WAIT_TWO_REGS_2_REF__SHIFT) & CP_WAIT_TWO_REGS_2_REF__MASK; +} + +#define REG_CP_DISPATCH_COMPUTE_0 0x00000000 + +#define REG_CP_DISPATCH_COMPUTE_1 0x00000001 +#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff +#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0 +static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val) +{ + return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK; +} + +#define REG_CP_DISPATCH_COMPUTE_2 0x00000002 +#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff +#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0 +static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val) +{ + return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK; +} + +#define REG_CP_DISPATCH_COMPUTE_3 0x00000003 +#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff +#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0 +static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val) +{ + return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK; +} + +#define REG_CP_SET_RENDER_MODE_0 0x00000000 +#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff +#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val) +{ + return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK; +} + +#define REG_CP_SET_RENDER_MODE_1 0x00000001 +#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff +#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK; +} + +#define REG_CP_SET_RENDER_MODE_2 0x00000002 +#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff +#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK; +} + +#define REG_CP_SET_RENDER_MODE_3 0x00000003 +#define CP_SET_RENDER_MODE_3_VSC_ENABLE 0x00000008 +#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010 + +#define REG_CP_SET_RENDER_MODE_4 0x00000004 + +#define REG_CP_SET_RENDER_MODE_5 0x00000005 +#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff +#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK; +} + +#define REG_CP_SET_RENDER_MODE_6 0x00000006 +#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff +#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK; +} + +#define REG_CP_SET_RENDER_MODE_7 0x00000007 +#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff +#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0 +static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val) +{ + return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK; +} + +#define REG_CP_COMPUTE_CHECKPOINT_0 0x00000000 +#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK 0xffffffff +#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT 0 +static inline uint32_t CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO(uint32_t val) +{ + return ((val) << CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK; +} + +#define REG_CP_COMPUTE_CHECKPOINT_1 0x00000001 +#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK 0xffffffff +#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT 0 +static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val) +{ + return ((val) << CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK; +} + +#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002 + +#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003 +#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff +#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0 +static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val) +{ + return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK; +} + +#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004 + +#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005 +#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff +#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0 +static inline uint32_t CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO(uint32_t val) +{ + return ((val) << CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK; +} + +#define REG_CP_COMPUTE_CHECKPOINT_6 0x00000006 +#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK 0xffffffff +#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT 0 +static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val) +{ + return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK; +} + +#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007 + +#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000 + +#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001 +#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff +#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0 +static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val) +{ + return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK; +} + +#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002 +#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff +#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0 +static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val) +{ + return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK; +} + +#define REG_CP_EVENT_WRITE_0 0x00000000 +#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff +#define CP_EVENT_WRITE_0_EVENT__SHIFT 0 +static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val) +{ + return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK; +} +#define CP_EVENT_WRITE_0_TIMESTAMP 0x40000000 +#define CP_EVENT_WRITE_0_IRQ 0x80000000 + +#define REG_CP_EVENT_WRITE_1 0x00000001 +#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff +#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0 +static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val) +{ + return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK; +} + +#define REG_CP_EVENT_WRITE_2 0x00000002 +#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff +#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0 +static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val) +{ + return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK; +} + +#define REG_CP_EVENT_WRITE_3 0x00000003 + +#define REG_CP_BLIT_0 0x00000000 +#define CP_BLIT_0_OP__MASK 0x0000000f +#define CP_BLIT_0_OP__SHIFT 0 +static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val) +{ + return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK; +} + +#define REG_CP_BLIT_1 0x00000001 +#define CP_BLIT_1_SRC_X1__MASK 0x00003fff +#define CP_BLIT_1_SRC_X1__SHIFT 0 +static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val) +{ + return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK; +} +#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000 +#define CP_BLIT_1_SRC_Y1__SHIFT 16 +static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val) +{ + return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK; +} + +#define REG_CP_BLIT_2 0x00000002 +#define CP_BLIT_2_SRC_X2__MASK 0x00003fff +#define CP_BLIT_2_SRC_X2__SHIFT 0 +static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val) +{ + return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK; +} +#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000 +#define CP_BLIT_2_SRC_Y2__SHIFT 16 +static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val) +{ + return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK; +} + +#define REG_CP_BLIT_3 0x00000003 +#define CP_BLIT_3_DST_X1__MASK 0x00003fff +#define CP_BLIT_3_DST_X1__SHIFT 0 +static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val) +{ + return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK; +} +#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000 +#define CP_BLIT_3_DST_Y1__SHIFT 16 +static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val) +{ + return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK; +} + +#define REG_CP_BLIT_4 0x00000004 +#define CP_BLIT_4_DST_X2__MASK 0x00003fff +#define CP_BLIT_4_DST_X2__SHIFT 0 +static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val) +{ + return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK; +} +#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000 +#define CP_BLIT_4_DST_Y2__SHIFT 16 +static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val) +{ + return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK; +} + +#define REG_CP_EXEC_CS_0 0x00000000 + +#define REG_CP_EXEC_CS_1 0x00000001 +#define CP_EXEC_CS_1_NGROUPS_X__MASK 0xffffffff +#define CP_EXEC_CS_1_NGROUPS_X__SHIFT 0 +static inline uint32_t CP_EXEC_CS_1_NGROUPS_X(uint32_t val) +{ + return ((val) << CP_EXEC_CS_1_NGROUPS_X__SHIFT) & CP_EXEC_CS_1_NGROUPS_X__MASK; +} + +#define REG_CP_EXEC_CS_2 0x00000002 +#define CP_EXEC_CS_2_NGROUPS_Y__MASK 0xffffffff +#define CP_EXEC_CS_2_NGROUPS_Y__SHIFT 0 +static inline uint32_t CP_EXEC_CS_2_NGROUPS_Y(uint32_t val) +{ + return ((val) << CP_EXEC_CS_2_NGROUPS_Y__SHIFT) & CP_EXEC_CS_2_NGROUPS_Y__MASK; +} + +#define REG_CP_EXEC_CS_3 0x00000003 +#define CP_EXEC_CS_3_NGROUPS_Z__MASK 0xffffffff +#define CP_EXEC_CS_3_NGROUPS_Z__SHIFT 0 +static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val) +{ + return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK; +} + +#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000 + + +#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001 +#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff +#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0 +static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val) +{ + return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK; +} + +#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002 +#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc +#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2 +static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val) +{ + return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK; +} +#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000 +#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12 +static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val) +{ + return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK; +} +#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000 +#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22 +static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val) +{ + return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK; +} + + +#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001 +#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff +#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0 +static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val) +{ + return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK; +} + +#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002 +#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff +#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0 +static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val) +{ + return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK; +} + +#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003 +#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc +#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2 +static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val) +{ + return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK; +} +#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000 +#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12 +static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val) +{ + return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK; +} +#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000 +#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22 +static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val) +{ + return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK; +} + +#define REG_A6XX_CP_SET_MARKER_0 0x00000000 +#define A6XX_CP_SET_MARKER_0_MODE__MASK 0x000001ff +#define A6XX_CP_SET_MARKER_0_MODE__SHIFT 0 +static inline uint32_t A6XX_CP_SET_MARKER_0_MODE(enum a6xx_marker val) +{ + return ((val) << A6XX_CP_SET_MARKER_0_MODE__SHIFT) & A6XX_CP_SET_MARKER_0_MODE__MASK; +} +#define A6XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f +#define A6XX_CP_SET_MARKER_0_MARKER__SHIFT 0 +static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_marker val) +{ + return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK; +} + +static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; } + +static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; } +#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007 +#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0 +static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val) +{ + return ((val) << A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK; +} + +static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; } +#define A6XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff +#define A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0 +static inline uint32_t A6XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val) +{ + return ((val) << A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A6XX_CP_SET_PSEUDO_REG__1_LO__MASK; +} + +static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; } +#define A6XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff +#define A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0 +static inline uint32_t A6XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val) +{ + return ((val) << A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A6XX_CP_SET_PSEUDO_REG__2_HI__MASK; +} + +#define REG_A6XX_CP_REG_TEST_0 0x00000000 +#define A6XX_CP_REG_TEST_0_REG__MASK 0x0003ffff +#define A6XX_CP_REG_TEST_0_REG__SHIFT 0 +static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val) +{ + return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK; +} +#define A6XX_CP_REG_TEST_0_BIT__MASK 0x01f00000 +#define A6XX_CP_REG_TEST_0_BIT__SHIFT 20 +static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val) +{ + return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK; +} +#define A6XX_CP_REG_TEST_0_WAIT_FOR_ME 0x02000000 + +#define REG_CP_COND_REG_EXEC_0 0x00000000 +#define CP_COND_REG_EXEC_0_REG0__MASK 0x0003ffff +#define CP_COND_REG_EXEC_0_REG0__SHIFT 0 +static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val) +{ + return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK; +} +#define CP_COND_REG_EXEC_0_BINNING 0x02000000 +#define CP_COND_REG_EXEC_0_GMEM 0x04000000 +#define CP_COND_REG_EXEC_0_SYSMEM 0x08000000 +#define CP_COND_REG_EXEC_0_MODE__MASK 0xf0000000 +#define CP_COND_REG_EXEC_0_MODE__SHIFT 28 +static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val) +{ + return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK; +} + +#define REG_CP_COND_REG_EXEC_1 0x00000001 +#define CP_COND_REG_EXEC_1_DWORDS__MASK 0xffffffff +#define CP_COND_REG_EXEC_1_DWORDS__SHIFT 0 +static inline uint32_t CP_COND_REG_EXEC_1_DWORDS(uint32_t val) +{ + return ((val) << CP_COND_REG_EXEC_1_DWORDS__SHIFT) & CP_COND_REG_EXEC_1_DWORDS__MASK; +} + +#define REG_CP_COND_EXEC_0 0x00000000 +#define CP_COND_EXEC_0_ADDR0_LO__MASK 0xffffffff +#define CP_COND_EXEC_0_ADDR0_LO__SHIFT 0 +static inline uint32_t CP_COND_EXEC_0_ADDR0_LO(uint32_t val) +{ + return ((val) << CP_COND_EXEC_0_ADDR0_LO__SHIFT) & CP_COND_EXEC_0_ADDR0_LO__MASK; +} + +#define REG_CP_COND_EXEC_1 0x00000001 +#define CP_COND_EXEC_1_ADDR0_HI__MASK 0xffffffff +#define CP_COND_EXEC_1_ADDR0_HI__SHIFT 0 +static inline uint32_t CP_COND_EXEC_1_ADDR0_HI(uint32_t val) +{ + return ((val) << CP_COND_EXEC_1_ADDR0_HI__SHIFT) & CP_COND_EXEC_1_ADDR0_HI__MASK; +} + +#define REG_CP_COND_EXEC_2 0x00000002 +#define CP_COND_EXEC_2_ADDR1_LO__MASK 0xffffffff +#define CP_COND_EXEC_2_ADDR1_LO__SHIFT 0 +static inline uint32_t CP_COND_EXEC_2_ADDR1_LO(uint32_t val) +{ + return ((val) << CP_COND_EXEC_2_ADDR1_LO__SHIFT) & CP_COND_EXEC_2_ADDR1_LO__MASK; +} + +#define REG_CP_COND_EXEC_3 0x00000003 +#define CP_COND_EXEC_3_ADDR1_HI__MASK 0xffffffff +#define CP_COND_EXEC_3_ADDR1_HI__SHIFT 0 +static inline uint32_t CP_COND_EXEC_3_ADDR1_HI(uint32_t val) +{ + return ((val) << CP_COND_EXEC_3_ADDR1_HI__SHIFT) & CP_COND_EXEC_3_ADDR1_HI__MASK; +} + +#define REG_CP_COND_EXEC_4 0x00000004 +#define CP_COND_EXEC_4_REF__MASK 0xffffffff +#define CP_COND_EXEC_4_REF__SHIFT 0 +static inline uint32_t CP_COND_EXEC_4_REF(uint32_t val) +{ + return ((val) << CP_COND_EXEC_4_REF__SHIFT) & CP_COND_EXEC_4_REF__MASK; +} + +#define REG_CP_COND_EXEC_5 0x00000005 +#define CP_COND_EXEC_5_DWORDS__MASK 0xffffffff +#define CP_COND_EXEC_5_DWORDS__SHIFT 0 +static inline uint32_t CP_COND_EXEC_5_DWORDS(uint32_t val) +{ + return ((val) << CP_COND_EXEC_5_DWORDS__SHIFT) & CP_COND_EXEC_5_DWORDS__MASK; +} + +#define REG_CP_SET_CTXSWITCH_IB_0 0x00000000 +#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK 0xffffffff +#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT 0 +static inline uint32_t CP_SET_CTXSWITCH_IB_0_ADDR_LO(uint32_t val) +{ + return ((val) << CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT) & CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK; +} + +#define REG_CP_SET_CTXSWITCH_IB_1 0x00000001 +#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK 0xffffffff +#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT 0 +static inline uint32_t CP_SET_CTXSWITCH_IB_1_ADDR_HI(uint32_t val) +{ + return ((val) << CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT) & CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK; +} + +#define REG_CP_SET_CTXSWITCH_IB_2 0x00000002 +#define CP_SET_CTXSWITCH_IB_2_DWORDS__MASK 0x000fffff +#define CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT 0 +static inline uint32_t CP_SET_CTXSWITCH_IB_2_DWORDS(uint32_t val) +{ + return ((val) << CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT) & CP_SET_CTXSWITCH_IB_2_DWORDS__MASK; +} +#define CP_SET_CTXSWITCH_IB_2_TYPE__MASK 0x00300000 +#define CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT 20 +static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val) +{ + return ((val) << CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT) & CP_SET_CTXSWITCH_IB_2_TYPE__MASK; +} + +#define REG_CP_REG_WRITE_0 0x00000000 +#define CP_REG_WRITE_0_TRACKER__MASK 0x00000007 +#define CP_REG_WRITE_0_TRACKER__SHIFT 0 +static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val) +{ + return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK; +} + +#define REG_CP_SMMU_TABLE_UPDATE_0 0x00000000 +#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK 0xffffffff +#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT 0 +static inline uint32_t CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(uint32_t val) +{ + return ((val) << CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT) & CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK; +} + +#define REG_CP_SMMU_TABLE_UPDATE_1 0x00000001 +#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK 0x0000ffff +#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT 0 +static inline uint32_t CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(uint32_t val) +{ + return ((val) << CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT) & CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK; +} +#define CP_SMMU_TABLE_UPDATE_1_ASID__MASK 0xffff0000 +#define CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT 16 +static inline uint32_t CP_SMMU_TABLE_UPDATE_1_ASID(uint32_t val) +{ + return ((val) << CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT) & CP_SMMU_TABLE_UPDATE_1_ASID__MASK; +} + +#define REG_CP_SMMU_TABLE_UPDATE_2 0x00000002 +#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK 0xffffffff +#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT 0 +static inline uint32_t CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(uint32_t val) +{ + return ((val) << CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT) & CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK; +} + +#define REG_CP_SMMU_TABLE_UPDATE_3 0x00000003 +#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK 0xffffffff +#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT 0 +static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val) +{ + return ((val) << CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT) & CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK; +} + +#define REG_CP_START_BIN_BIN_COUNT 0x00000000 + +#define REG_CP_START_BIN_PREFIX_ADDR 0x00000001 + +#define REG_CP_START_BIN_PREFIX_DWORDS 0x00000003 + +#define REG_CP_START_BIN_BODY_DWORDS 0x00000004 + + +#endif /* ADRENO_PM4_XML */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h new file mode 100644 index 000000000..b5b6e7031 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __DPU_CORE_IRQ_H__ +#define __DPU_CORE_IRQ_H__ + +#include "dpu_kms.h" +#include "dpu_hw_interrupts.h" + +/** + * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ +void dpu_core_irq_preinstall(struct msm_kms *kms); + +/** + * dpu_core_irq_uninstall - uninstall core IRQ handler + * @kms: MSM KMS handle + * @return: none + */ +void dpu_core_irq_uninstall(struct msm_kms *kms); + +/** + * dpu_core_irq - core IRQ handler + * @kms: MSM KMS handle + * @return: interrupt handling status + */ +irqreturn_t dpu_core_irq(struct msm_kms *kms); + +/** + * dpu_core_irq_read - IRQ helper function for reading IRQ status + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: non-zero if irq detected; otherwise no irq detected + */ +u32 dpu_core_irq_read( + struct dpu_kms *dpu_kms, + int irq_idx); + +/** + * dpu_core_irq_register_callback - For registering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @irq_cb: IRQ callback funcion. + * @irq_arg: IRQ callback argument. + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ +int dpu_core_irq_register_callback( + struct dpu_kms *dpu_kms, + int irq_idx, + void (*irq_cb)(void *arg, int irq_idx), + void *irq_arg); + +/** + * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ + * interrupt + * @dpu_kms: DPU handle + * @irq_idx: irq index + * @return: 0 for success registering callback, otherwise failure + * + * This function supports registration of multiple callbacks for each interrupt. + */ +int dpu_core_irq_unregister_callback( + struct dpu_kms *dpu_kms, + int irq_idx); + +/** + * dpu_debugfs_core_irq_init - register core irq debugfs + * @dpu_kms: pointer to kms + * @parent: debugfs directory root + */ +void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, + struct dentry *parent); + +#endif /* __DPU_CORE_IRQ_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c new file mode 100644 index 000000000..1d9d83d7b --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -0,0 +1,534 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include +#include +#include +#include +#include +#include +#include + +#include "dpu_kms.h" +#include "dpu_trace.h" +#include "dpu_crtc.h" +#include "dpu_core_perf.h" + +/** + * enum dpu_perf_mode - performance tuning mode + * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client + * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting + * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting + * @DPU_PERF_MODE_MAX: maximum value, used for error checking + */ +enum dpu_perf_mode { + DPU_PERF_MODE_NORMAL, + DPU_PERF_MODE_MINIMUM, + DPU_PERF_MODE_FIXED, + DPU_PERF_MODE_MAX +}; + +/** + * _dpu_core_perf_calc_bw() - to calculate BW per crtc + * @kms: pointer to the dpu_kms + * @crtc: pointer to a crtc + * Return: returns aggregated BW for all planes in crtc. + */ +static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms, + struct drm_crtc *crtc) +{ + struct drm_plane *plane; + struct dpu_plane_state *pstate; + u64 crtc_plane_bw = 0; + u32 bw_factor; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_dpu_plane_state(plane->state); + if (!pstate) + continue; + + crtc_plane_bw += pstate->plane_fetch_bw; + } + + bw_factor = kms->catalog->perf->bw_inefficiency_factor; + if (bw_factor) { + crtc_plane_bw *= bw_factor; + do_div(crtc_plane_bw, 100); + } + + return crtc_plane_bw; +} + +/** + * _dpu_core_perf_calc_clk() - to calculate clock per crtc + * @kms: pointer to the dpu_kms + * @crtc: pointer to a crtc + * @state: pointer to a crtc state + * Return: returns max clk for all planes in crtc. + */ +static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms, + struct drm_crtc *crtc, struct drm_crtc_state *state) +{ + struct drm_plane *plane; + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + u64 crtc_clk; + u32 clk_factor; + + mode = &state->adjusted_mode; + + crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); + + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_dpu_plane_state(plane->state); + if (!pstate) + continue; + + crtc_clk = max(pstate->plane_clk, crtc_clk); + } + + clk_factor = kms->catalog->perf->clk_inefficiency_factor; + if (clk_factor) { + crtc_clk *= clk_factor; + do_div(crtc_clk, 100); + } + + return crtc_clk; +} + +static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv; + priv = crtc->dev->dev_private; + return to_dpu_kms(priv->kms); +} + +static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, + struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct dpu_core_perf_params *perf) +{ + if (!kms || !kms->catalog || !crtc || !state || !perf) { + DPU_ERROR("invalid parameters\n"); + return; + } + + memset(perf, 0, sizeof(struct dpu_core_perf_params)); + + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { + perf->bw_ctl = 0; + perf->max_per_pipe_ib = 0; + perf->core_clk_rate = 0; + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { + perf->bw_ctl = kms->perf.fix_core_ab_vote; + perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote; + perf->core_clk_rate = kms->perf.fix_core_clk_rate; + } else { + perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc); + perf->max_per_pipe_ib = kms->catalog->perf->min_dram_ib; + perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state); + } + + DRM_DEBUG_ATOMIC( + "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", + crtc->base.id, perf->core_clk_rate, + perf->max_per_pipe_ib, perf->bw_ctl); +} + +int dpu_core_perf_crtc_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + u32 bw, threshold; + u64 bw_sum_of_intfs = 0; + enum dpu_crtc_client_type curr_client_type; + struct dpu_crtc_state *dpu_cstate; + struct drm_crtc *tmp_crtc; + struct dpu_kms *kms; + + if (!crtc || !state) { + DPU_ERROR("invalid crtc\n"); + return -EINVAL; + } + + kms = _dpu_crtc_get_kms(crtc); + if (!kms->catalog) { + DPU_ERROR("invalid parameters\n"); + return 0; + } + + /* we only need bandwidth check on real-time clients (interfaces) */ + if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT) + return 0; + + dpu_cstate = to_dpu_crtc_state(state); + + /* obtain new values */ + _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf); + + bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl; + curr_client_type = dpu_crtc_get_client_type(crtc); + + drm_for_each_crtc(tmp_crtc, crtc->dev) { + if (tmp_crtc->enabled && + (dpu_crtc_get_client_type(tmp_crtc) == + curr_client_type) && (tmp_crtc != crtc)) { + struct dpu_crtc_state *tmp_cstate = + to_dpu_crtc_state(tmp_crtc->state); + + DRM_DEBUG_ATOMIC("crtc:%d bw:%llu ctrl:%d\n", + tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl, + tmp_cstate->bw_control); + + bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl; + } + + /* convert bandwidth to kb */ + bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000); + DRM_DEBUG_ATOMIC("calculated bandwidth=%uk\n", bw); + + threshold = kms->catalog->perf->max_bw_high; + + DRM_DEBUG_ATOMIC("final threshold bw limit = %d\n", threshold); + + if (!threshold) { + DPU_ERROR("no bandwidth limits specified\n"); + return -E2BIG; + } else if (bw > threshold) { + DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, + threshold); + return -E2BIG; + } + } + + return 0; +} + +static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, + struct drm_crtc *crtc) +{ + struct dpu_core_perf_params perf = { 0 }; + enum dpu_crtc_client_type curr_client_type + = dpu_crtc_get_client_type(crtc); + struct drm_crtc *tmp_crtc; + struct dpu_crtc_state *dpu_cstate; + int i, ret = 0; + u64 avg_bw; + + drm_for_each_crtc(tmp_crtc, crtc->dev) { + if (tmp_crtc->enabled && + curr_client_type == + dpu_crtc_get_client_type(tmp_crtc)) { + dpu_cstate = to_dpu_crtc_state(tmp_crtc->state); + + perf.max_per_pipe_ib = max(perf.max_per_pipe_ib, + dpu_cstate->new_perf.max_per_pipe_ib); + + perf.bw_ctl += dpu_cstate->new_perf.bw_ctl; + + DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n", + tmp_crtc->base.id, + dpu_cstate->new_perf.bw_ctl, kms->num_paths); + } + } + + if (!kms->num_paths) + return 0; + + avg_bw = perf.bw_ctl; + do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ + + for (i = 0; i < kms->num_paths; i++) + icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); + + return ret; +} + +/** + * dpu_core_perf_crtc_release_bw() - request zero bandwidth + * @crtc: pointer to a crtc + * + * Function checks a state variable for the crtc, if all pending commit + * requests are done, meaning no more bandwidth is needed, release + * bandwidth request. + */ +void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc; + struct dpu_kms *kms; + + if (!crtc) { + DPU_ERROR("invalid crtc\n"); + return; + } + + kms = _dpu_crtc_get_kms(crtc); + if (!kms->catalog) { + DPU_ERROR("invalid kms\n"); + return; + } + + dpu_crtc = to_dpu_crtc(crtc); + + if (atomic_dec_return(&kms->bandwidth_ref) > 0) + return; + + /* Release the bandwidth */ + if (kms->perf.enable_bw_release) { + trace_dpu_cmd_release_bw(crtc->base.id); + DRM_DEBUG_ATOMIC("Release BW crtc=%d\n", crtc->base.id); + dpu_crtc->cur_perf.bw_ctl = 0; + _dpu_core_perf_crtc_update_bus(kms, crtc); + } +} + +static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms) +{ + u64 clk_rate = kms->perf.perf_tune.min_core_clk; + struct drm_crtc *crtc; + struct dpu_crtc_state *dpu_cstate; + + drm_for_each_crtc(crtc, kms->dev) { + if (crtc->enabled) { + dpu_cstate = to_dpu_crtc_state(crtc->state); + clk_rate = max(dpu_cstate->new_perf.core_clk_rate, + clk_rate); + clk_rate = clk_round_rate(kms->perf.core_clk, + clk_rate); + } + } + + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) + clk_rate = kms->perf.fix_core_clk_rate; + + DRM_DEBUG_ATOMIC("clk:%llu\n", clk_rate); + + return clk_rate; +} + +int dpu_core_perf_crtc_update(struct drm_crtc *crtc, + int params_changed, bool stop_req) +{ + struct dpu_core_perf_params *new, *old; + bool update_bus = false, update_clk = false; + u64 clk_rate = 0; + struct dpu_crtc *dpu_crtc; + struct dpu_crtc_state *dpu_cstate; + struct dpu_kms *kms; + int ret; + + if (!crtc) { + DPU_ERROR("invalid crtc\n"); + return -EINVAL; + } + + kms = _dpu_crtc_get_kms(crtc); + if (!kms->catalog) { + DPU_ERROR("invalid kms\n"); + return -EINVAL; + } + + dpu_crtc = to_dpu_crtc(crtc); + dpu_cstate = to_dpu_crtc_state(crtc->state); + + DRM_DEBUG_ATOMIC("crtc:%d stop_req:%d core_clk:%llu\n", + crtc->base.id, stop_req, kms->perf.core_clk_rate); + + old = &dpu_crtc->cur_perf; + new = &dpu_cstate->new_perf; + + if (crtc->enabled && !stop_req) { + /* + * cases for bus bandwidth update. + * 1. new bandwidth vote - "ab or ib vote" is higher + * than current vote for update request. + * 2. new bandwidth vote - "ab or ib vote" is lower + * than current vote at end of commit or stop. + */ + if ((params_changed && ((new->bw_ctl > old->bw_ctl) || + (new->max_per_pipe_ib > old->max_per_pipe_ib))) || + (!params_changed && ((new->bw_ctl < old->bw_ctl) || + (new->max_per_pipe_ib < old->max_per_pipe_ib)))) { + DRM_DEBUG_ATOMIC("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n", + crtc->base.id, params_changed, + new->bw_ctl, old->bw_ctl); + old->bw_ctl = new->bw_ctl; + old->max_per_pipe_ib = new->max_per_pipe_ib; + update_bus = true; + } + + if ((params_changed && + (new->core_clk_rate > old->core_clk_rate)) || + (!params_changed && + (new->core_clk_rate < old->core_clk_rate))) { + old->core_clk_rate = new->core_clk_rate; + update_clk = true; + } + } else { + DRM_DEBUG_ATOMIC("crtc=%d disable\n", crtc->base.id); + memset(old, 0, sizeof(*old)); + update_bus = true; + update_clk = true; + } + + trace_dpu_perf_crtc_update(crtc->base.id, new->bw_ctl, + new->core_clk_rate, stop_req, update_bus, update_clk); + + if (update_bus) { + ret = _dpu_core_perf_crtc_update_bus(kms, crtc); + if (ret) { + DPU_ERROR("crtc-%d: failed to update bus bw vote\n", + crtc->base.id); + return ret; + } + } + + /* + * Update the clock after bandwidth vote to ensure + * bandwidth is available before clock rate is increased. + */ + if (update_clk) { + clk_rate = _dpu_core_perf_get_core_clk_rate(kms); + + trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate); + + clk_rate = min(clk_rate, kms->perf.max_core_clk_rate); + ret = dev_pm_opp_set_rate(&kms->pdev->dev, clk_rate); + if (ret) { + DPU_ERROR("failed to set core clock rate %llu\n", clk_rate); + return ret; + } + + kms->perf.core_clk_rate = clk_rate; + DRM_DEBUG_ATOMIC("update clk rate = %lld HZ\n", clk_rate); + } + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +static ssize_t _dpu_core_perf_mode_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dpu_core_perf *perf = file->private_data; + const struct dpu_perf_cfg *cfg = perf->catalog->perf; + u32 perf_mode = 0; + int ret; + + ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode); + if (ret) + return ret; + + if (perf_mode >= DPU_PERF_MODE_MAX) + return -EINVAL; + + if (perf_mode == DPU_PERF_MODE_FIXED) { + DRM_INFO("fix performance mode\n"); + } else if (perf_mode == DPU_PERF_MODE_MINIMUM) { + /* run the driver with max clk and BW vote */ + perf->perf_tune.min_core_clk = perf->max_core_clk_rate; + perf->perf_tune.min_bus_vote = + (u64) cfg->max_bw_high * 1000; + DRM_INFO("minimum performance mode\n"); + } else if (perf_mode == DPU_PERF_MODE_NORMAL) { + /* reset the perf tune params to 0 */ + perf->perf_tune.min_core_clk = 0; + perf->perf_tune.min_bus_vote = 0; + DRM_INFO("normal performance mode\n"); + } + perf->perf_tune.mode = perf_mode; + + return count; +} + +static ssize_t _dpu_core_perf_mode_read(struct file *file, + char __user *buff, size_t count, loff_t *ppos) +{ + struct dpu_core_perf *perf = file->private_data; + int len; + char buf[128]; + + len = scnprintf(buf, sizeof(buf), + "mode %d min_mdp_clk %llu min_bus_vote %llu\n", + perf->perf_tune.mode, + perf->perf_tune.min_core_clk, + perf->perf_tune.min_bus_vote); + + return simple_read_from_buffer(buff, count, ppos, buf, len); +} + +static const struct file_operations dpu_core_perf_mode_fops = { + .open = simple_open, + .read = _dpu_core_perf_mode_read, + .write = _dpu_core_perf_mode_write, +}; + +int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent) +{ + struct dpu_core_perf *perf = &dpu_kms->perf; + const struct dpu_mdss_cfg *catalog = perf->catalog; + struct dentry *entry; + + entry = debugfs_create_dir("core_perf", parent); + + debugfs_create_u64("max_core_clk_rate", 0600, entry, + &perf->max_core_clk_rate); + debugfs_create_u64("core_clk_rate", 0600, entry, + &perf->core_clk_rate); + debugfs_create_u32("enable_bw_release", 0600, entry, + (u32 *)&perf->enable_bw_release); + debugfs_create_u32("threshold_low", 0600, entry, + (u32 *)&catalog->perf->max_bw_low); + debugfs_create_u32("threshold_high", 0600, entry, + (u32 *)&catalog->perf->max_bw_high); + debugfs_create_u32("min_core_ib", 0600, entry, + (u32 *)&catalog->perf->min_core_ib); + debugfs_create_u32("min_llcc_ib", 0600, entry, + (u32 *)&catalog->perf->min_llcc_ib); + debugfs_create_u32("min_dram_ib", 0600, entry, + (u32 *)&catalog->perf->min_dram_ib); + debugfs_create_file("perf_mode", 0600, entry, + (u32 *)perf, &dpu_core_perf_mode_fops); + debugfs_create_u64("fix_core_clk_rate", 0600, entry, + &perf->fix_core_clk_rate); + debugfs_create_u64("fix_core_ib_vote", 0600, entry, + &perf->fix_core_ib_vote); + debugfs_create_u64("fix_core_ab_vote", 0600, entry, + &perf->fix_core_ab_vote); + + return 0; +} +#endif + +void dpu_core_perf_destroy(struct dpu_core_perf *perf) +{ + if (!perf) { + DPU_ERROR("invalid parameters\n"); + return; + } + + perf->max_core_clk_rate = 0; + perf->core_clk = NULL; + perf->catalog = NULL; + perf->dev = NULL; +} + +int dpu_core_perf_init(struct dpu_core_perf *perf, + struct drm_device *dev, + const struct dpu_mdss_cfg *catalog, + struct clk *core_clk) +{ + perf->dev = dev; + perf->catalog = catalog; + perf->core_clk = core_clk; + + perf->max_core_clk_rate = clk_get_rate(core_clk); + if (!perf->max_core_clk_rate) { + DPU_DEBUG("optional max core clk rate, use default\n"); + perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE; + } + + return 0; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h new file mode 100644 index 000000000..29bb8ee2b --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_CORE_PERF_H_ +#define _DPU_CORE_PERF_H_ + +#include +#include +#include +#include + +#include "dpu_hw_catalog.h" + +#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000 + +/** + * struct dpu_core_perf_params - definition of performance parameters + * @max_per_pipe_ib: maximum instantaneous bandwidth request + * @bw_ctl: arbitrated bandwidth request + * @core_clk_rate: core clock rate request + */ +struct dpu_core_perf_params { + u64 max_per_pipe_ib; + u64 bw_ctl; + u64 core_clk_rate; +}; + +/** + * struct dpu_core_perf_tune - definition of performance tuning control + * @mode: performance mode + * @min_core_clk: minimum core clock + * @min_bus_vote: minimum bus vote + */ +struct dpu_core_perf_tune { + u32 mode; + u64 min_core_clk; + u64 min_bus_vote; +}; + +/** + * struct dpu_core_perf - definition of core performance context + * @dev: Pointer to drm device + * @debugfs_root: top level debug folder + * @catalog: Pointer to catalog configuration + * @core_clk: Pointer to the core clock + * @core_clk_rate: current core clock rate + * @max_core_clk_rate: maximum allowable core clock rate + * @perf_tune: debug control for performance tuning + * @enable_bw_release: debug control for bandwidth release + * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2 + * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2 + * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2 + */ +struct dpu_core_perf { + struct drm_device *dev; + struct dentry *debugfs_root; + const struct dpu_mdss_cfg *catalog; + struct clk *core_clk; + u64 core_clk_rate; + u64 max_core_clk_rate; + struct dpu_core_perf_tune perf_tune; + u32 enable_bw_release; + u64 fix_core_clk_rate; + u64 fix_core_ib_vote; + u64 fix_core_ab_vote; +}; + +/** + * dpu_core_perf_crtc_check - validate performance of the given crtc state + * @crtc: Pointer to crtc + * @state: Pointer to new crtc state + * return: zero if success, or error code otherwise + */ +int dpu_core_perf_crtc_check(struct drm_crtc *crtc, + struct drm_crtc_state *state); + +/** + * dpu_core_perf_crtc_update - update performance of the given crtc + * @crtc: Pointer to crtc + * @params_changed: true if crtc parameters are modified + * @stop_req: true if this is a stop request + * return: zero if success, or error code otherwise + */ +int dpu_core_perf_crtc_update(struct drm_crtc *crtc, + int params_changed, bool stop_req); + +/** + * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc + * @crtc: Pointer to crtc + */ +void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc); + +/** + * dpu_core_perf_destroy - destroy the given core performance context + * @perf: Pointer to core performance context + */ +void dpu_core_perf_destroy(struct dpu_core_perf *perf); + +/** + * dpu_core_perf_init - initialize the given core performance context + * @perf: Pointer to core performance context + * @dev: Pointer to drm device + * @catalog: Pointer to catalog + * @core_clk: pointer to core clock + */ +int dpu_core_perf_init(struct dpu_core_perf *perf, + struct drm_device *dev, + const struct dpu_mdss_cfg *catalog, + struct clk *core_clk); + +struct dpu_kms; + +/** + * dpu_core_perf_debugfs_init - initialize debugfs for core performance context + * @dpu_kms: Pointer to the dpu_kms struct + * @debugfs_parent: Pointer to parent debugfs + */ +int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent); + +#endif /* _DPU_CORE_PERF_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c new file mode 100644 index 000000000..5a5821e59 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -0,0 +1,1626 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dpu_kms.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_ctl.h" +#include "dpu_hw_dspp.h" +#include "dpu_crtc.h" +#include "dpu_plane.h" +#include "dpu_encoder.h" +#include "dpu_vbif.h" +#include "dpu_core_perf.h" +#include "dpu_trace.h" + +/* layer mixer index on dpu_crtc */ +#define LEFT_MIXER 0 +#define RIGHT_MIXER 1 + +/* timeout in ms waiting for frame done */ +#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 + +#define CONVERT_S3_15(val) \ + (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) + +static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv = crtc->dev->dev_private; + + return to_dpu_kms(priv->kms); +} + +static void dpu_crtc_destroy(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + + if (!crtc) + return; + + drm_crtc_cleanup(crtc); + kfree(dpu_crtc); +} + +static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) + if (encoder->crtc == crtc) + return encoder; + + return NULL; +} + +static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) +{ + if (!src_name || + !strcmp(src_name, "none")) + return DPU_CRTC_CRC_SOURCE_NONE; + if (!strcmp(src_name, "auto") || + !strcmp(src_name, "lm")) + return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; + if (!strcmp(src_name, "encoder")) + return DPU_CRTC_CRC_SOURCE_ENCODER; + + return DPU_CRTC_CRC_SOURCE_INVALID; +} + +static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, size_t *values_cnt) +{ + enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); + struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); + + if (source < 0) { + DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); + return -EINVAL; + } + + if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) { + *values_cnt = crtc_state->num_mixers; + } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) { + struct drm_encoder *drm_enc; + + *values_cnt = 0; + + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) + *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc); + } + + return 0; +} + +static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) +{ + struct dpu_crtc_mixer *m; + int i; + + for (i = 0; i < crtc_state->num_mixers; ++i) { + m = &crtc_state->mixers[i]; + + if (!m->hw_lm || !m->hw_lm->ops.setup_misr) + continue; + + /* Calculate MISR over 1 frame */ + m->hw_lm->ops.setup_misr(m->hw_lm); + } +} + +static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc) +{ + struct drm_encoder *drm_enc; + + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) + dpu_encoder_setup_misr(drm_enc); +} + +static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) +{ + enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); + enum dpu_crtc_crc_source current_source; + struct dpu_crtc_state *crtc_state; + struct drm_device *drm_dev = crtc->dev; + + bool was_enabled; + bool enable = false; + int ret = 0; + + if (source < 0) { + DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); + return -EINVAL; + } + + ret = drm_modeset_lock(&crtc->mutex, NULL); + + if (ret) + return ret; + + enable = (source != DPU_CRTC_CRC_SOURCE_NONE); + crtc_state = to_dpu_crtc_state(crtc->state); + + spin_lock_irq(&drm_dev->event_lock); + current_source = crtc_state->crc_source; + spin_unlock_irq(&drm_dev->event_lock); + + was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); + + if (!was_enabled && enable) { + ret = drm_crtc_vblank_get(crtc); + + if (ret) + goto cleanup; + + } else if (was_enabled && !enable) { + drm_crtc_vblank_put(crtc); + } + + spin_lock_irq(&drm_dev->event_lock); + crtc_state->crc_source = source; + spin_unlock_irq(&drm_dev->event_lock); + + crtc_state->crc_frame_skip_count = 0; + + if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) + dpu_crtc_setup_lm_misr(crtc_state); + else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) + dpu_crtc_setup_encoder_misr(crtc); + else + ret = -EINVAL; + +cleanup: + drm_modeset_unlock(&crtc->mutex); + + return ret; +} + +static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder = get_encoder_from_crtc(crtc); + if (!encoder) { + DRM_ERROR("no encoder found for crtc %d\n", crtc->index); + return 0; + } + + return dpu_encoder_get_vsync_count(encoder); +} + +static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc, + struct dpu_crtc_state *crtc_state) +{ + struct dpu_crtc_mixer *m; + u32 crcs[CRTC_DUAL_MIXERS]; + + int rc = 0; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); + + for (i = 0; i < crtc_state->num_mixers; ++i) { + + m = &crtc_state->mixers[i]; + + if (!m->hw_lm || !m->hw_lm->ops.collect_misr) + continue; + + rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); + + if (rc) { + if (rc != -ENODATA) + DRM_DEBUG_DRIVER("MISR read failed\n"); + return rc; + } + } + + return drm_crtc_add_crc_entry(crtc, true, + drm_crtc_accurate_vblank_count(crtc), crcs); +} + +static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc) +{ + struct drm_encoder *drm_enc; + int rc, pos = 0; + u32 crcs[INTF_MAX]; + + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) { + rc = dpu_encoder_get_crc(drm_enc, crcs, pos); + if (rc < 0) { + if (rc != -ENODATA) + DRM_DEBUG_DRIVER("MISR read failed\n"); + + return rc; + } + + pos += rc; + } + + return drm_crtc_add_crc_entry(crtc, true, + drm_crtc_accurate_vblank_count(crtc), crcs); +} + +static int dpu_crtc_get_crc(struct drm_crtc *crtc) +{ + struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); + + /* Skip first 2 frames in case of "uncooked" CRCs */ + if (crtc_state->crc_frame_skip_count < 2) { + crtc_state->crc_frame_skip_count++; + return 0; + } + + if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) + return dpu_crtc_get_lm_crc(crtc, crtc_state); + else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER) + return dpu_crtc_get_encoder_crc(crtc); + + return -EINVAL; +} + +static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, + bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) +{ + unsigned int pipe = crtc->index; + struct drm_encoder *encoder; + int line, vsw, vbp, vactive_start, vactive_end, vfp_end; + + encoder = get_encoder_from_crtc(crtc); + if (!encoder) { + DRM_ERROR("no encoder found for crtc %d\n", pipe); + return false; + } + + vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; + vbp = mode->crtc_vtotal - mode->crtc_vsync_end; + + /* + * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at + * the end of VFP. Translate the porch values relative to the line + * counter positions. + */ + + vactive_start = vsw + vbp + 1; + vactive_end = vactive_start + mode->crtc_vdisplay; + + /* last scan line before VSYNC */ + vfp_end = mode->crtc_vtotal; + + if (stime) + *stime = ktime_get(); + + line = dpu_encoder_get_linecount(encoder); + + if (line < vactive_start) + line -= vactive_start; + else if (line > vactive_end) + line = line - vfp_end - vactive_start; + else + line -= vactive_start; + + *vpos = line; + *hpos = 0; + + if (etime) + *etime = ktime_get(); + + return true; +} + +static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, + struct dpu_plane_state *pstate, struct dpu_format *format) +{ + struct dpu_hw_mixer *lm = mixer->hw_lm; + uint32_t blend_op; + uint32_t fg_alpha, bg_alpha; + + fg_alpha = pstate->base.alpha >> 8; + bg_alpha = 0xff - fg_alpha; + + /* default to opaque blending */ + if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE || + !format->alpha_enable) { + blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | + DPU_BLEND_BG_ALPHA_BG_CONST; + } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { + blend_op = DPU_BLEND_FG_ALPHA_FG_CONST | + DPU_BLEND_BG_ALPHA_FG_PIXEL; + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= DPU_BLEND_BG_MOD_ALPHA | + DPU_BLEND_BG_INV_MOD_ALPHA; + } else { + blend_op |= DPU_BLEND_BG_INV_ALPHA; + } + } else { + /* coverage blending */ + blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL | + DPU_BLEND_BG_ALPHA_FG_PIXEL; + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= DPU_BLEND_FG_MOD_ALPHA | + DPU_BLEND_FG_INV_MOD_ALPHA | + DPU_BLEND_BG_MOD_ALPHA | + DPU_BLEND_BG_INV_MOD_ALPHA; + } else { + blend_op |= DPU_BLEND_BG_INV_ALPHA; + } + } + + lm->ops.setup_blend_config(lm, pstate->stage, + fg_alpha, bg_alpha, blend_op); + + DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n", + &format->base.pixel_format, format->alpha_enable, blend_op); +} + +static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) +{ + struct dpu_crtc_state *crtc_state; + int lm_idx, lm_horiz_position; + + crtc_state = to_dpu_crtc_state(crtc->state); + + lm_horiz_position = 0; + for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) { + const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx]; + struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm; + struct dpu_hw_mixer_cfg cfg; + + if (!lm_roi || !drm_rect_visible(lm_roi)) + continue; + + cfg.out_width = drm_rect_width(lm_roi); + cfg.out_height = drm_rect_height(lm_roi); + cfg.right_mixer = lm_horiz_position++; + cfg.flags = 0; + hw_lm->ops.setup_mixer_out(hw_lm, &cfg); + } +} + +static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, + struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer, + struct dpu_hw_stage_cfg *stage_cfg) +{ + struct drm_plane *plane; + struct drm_framebuffer *fb; + struct drm_plane_state *state; + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); + struct dpu_plane_state *pstate = NULL; + struct dpu_format *format; + struct dpu_hw_ctl *ctl = mixer->lm_ctl; + + uint32_t stage_idx, lm_idx; + int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; + bool bg_alpha_enable = false; + DECLARE_BITMAP(fetch_active, SSPP_MAX); + + memset(fetch_active, 0, sizeof(fetch_active)); + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum dpu_sspp sspp_idx; + + state = plane->state; + if (!state) + continue; + + if (!state->visible) + continue; + + pstate = to_dpu_plane_state(state); + fb = state->fb; + + sspp_idx = dpu_plane_pipe(plane); + set_bit(sspp_idx, fetch_active); + + DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n", + crtc->base.id, + pstate->stage, + plane->base.id, + sspp_idx - SSPP_VIG0, + state->fb ? state->fb->base.id : -1); + + format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); + + if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable) + bg_alpha_enable = true; + + stage_idx = zpos_cnt[pstate->stage]++; + stage_cfg->stage[pstate->stage][stage_idx] = + sspp_idx; + stage_cfg->multirect_index[pstate->stage][stage_idx] = + pstate->multirect_index; + + trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), + state, pstate, stage_idx, + sspp_idx - SSPP_VIG0, + format->base.pixel_format, + fb ? fb->modifier : 0); + + /* blend config update */ + for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) { + _dpu_crtc_setup_blend_cfg(mixer + lm_idx, + pstate, format); + + mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, + sspp_idx); + + if (bg_alpha_enable && !format->alpha_enable) + mixer[lm_idx].mixer_op_mode = 0; + else + mixer[lm_idx].mixer_op_mode |= + 1 << pstate->stage; + } + } + + if (ctl->ops.set_active_pipes) + ctl->ops.set_active_pipes(ctl, fetch_active); + + _dpu_crtc_program_lm_output_roi(crtc); +} + +/** + * _dpu_crtc_blend_setup - configure crtc mixers + * @crtc: Pointer to drm crtc structure + */ +static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); + struct dpu_crtc_mixer *mixer = cstate->mixers; + struct dpu_hw_ctl *ctl; + struct dpu_hw_mixer *lm; + struct dpu_hw_stage_cfg stage_cfg; + int i; + + DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name); + + for (i = 0; i < cstate->num_mixers; i++) { + mixer[i].mixer_op_mode = 0; + if (mixer[i].lm_ctl->ops.clear_all_blendstages) + mixer[i].lm_ctl->ops.clear_all_blendstages( + mixer[i].lm_ctl); + } + + /* initialize stage cfg */ + memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg)); + + _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg); + + for (i = 0; i < cstate->num_mixers; i++) { + ctl = mixer[i].lm_ctl; + lm = mixer[i].hw_lm; + + lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); + + /* stage config flush mask */ + ctl->ops.update_pending_flush_mixer(ctl, + mixer[i].hw_lm->idx); + + DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", + mixer[i].hw_lm->idx - LM_0, + mixer[i].mixer_op_mode, + ctl->idx - CTL_0); + + ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, + &stage_cfg); + } +} + +/** + * _dpu_crtc_complete_flip - signal pending page_flip events + * Any pending vblank events are added to the vblank_event_list + * so that the next vblank interrupt shall signal them. + * However PAGE_FLIP events are not handled through the vblank_event_list. + * This API signals any pending PAGE_FLIP events requested through + * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. + * @crtc: Pointer to drm crtc structure + */ +static void _dpu_crtc_complete_flip(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + if (dpu_crtc->event) { + DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name, + dpu_crtc->event); + trace_dpu_crtc_complete_flip(DRMID(crtc)); + drm_crtc_send_vblank_event(crtc, dpu_crtc->event); + dpu_crtc->event = NULL; + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + + /* + * TODO: This function is called from dpu debugfs and as part of atomic + * check. When called from debugfs, the crtc->mutex must be held to + * read crtc->state. However reading crtc->state from atomic check isn't + * allowed (unless you have a good reason, a big comment, and a deep + * understanding of how the atomic/modeset locks work (<- and this is + * probably not possible)). So we'll keep the WARN_ON here for now, but + * really we need to figure out a better way to track our operating mode + */ + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); + + /* TODO: Returns the first INTF_MODE, could there be multiple values? */ + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) + return dpu_encoder_get_intf_mode(encoder); + + return INTF_MODE_NONE; +} + +void dpu_crtc_vblank_callback(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + + /* keep statistics on vblank callback - with auto reset via debugfs */ + if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0) + dpu_crtc->vblank_cb_time = ktime_get(); + else + dpu_crtc->vblank_cb_count++; + + dpu_crtc_get_crc(crtc); + + drm_crtc_handle_vblank(crtc); + trace_dpu_crtc_vblank_cb(DRMID(crtc)); +} + +static void dpu_crtc_frame_event_work(struct kthread_work *work) +{ + struct dpu_crtc_frame_event *fevent = container_of(work, + struct dpu_crtc_frame_event, work); + struct drm_crtc *crtc = fevent->crtc; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + unsigned long flags; + bool frame_done = false; + + DPU_ATRACE_BEGIN("crtc_frame_event"); + + DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, + ktime_to_ns(fevent->ts)); + + if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE + | DPU_ENCODER_FRAME_EVENT_ERROR + | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { + + if (atomic_read(&dpu_crtc->frame_pending) < 1) { + /* ignore vblank when not pending */ + } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { + /* release bandwidth and other resources */ + trace_dpu_crtc_frame_event_done(DRMID(crtc), + fevent->event); + dpu_core_perf_crtc_release_bw(crtc); + } else { + trace_dpu_crtc_frame_event_more_pending(DRMID(crtc), + fevent->event); + } + + if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE + | DPU_ENCODER_FRAME_EVENT_ERROR)) + frame_done = true; + } + + if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD) + DPU_ERROR("crtc%d ts:%lld received panel dead event\n", + crtc->base.id, ktime_to_ns(fevent->ts)); + + if (frame_done) + complete_all(&dpu_crtc->frame_done_comp); + + spin_lock_irqsave(&dpu_crtc->spin_lock, flags); + list_add_tail(&fevent->list, &dpu_crtc->frame_event_list); + spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); + DPU_ATRACE_END("crtc_frame_event"); +} + +/* + * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module + * registers this API to encoder for all frame event callbacks like + * frame_error, frame_done, idle_timeout, etc. Encoder may call different events + * from different context - IRQ, user thread, commit_thread, etc. Each event + * should be carefully reviewed and should be processed in proper task context + * to avoid schedulin delay or properly manage the irq context's bottom half + * processing. + */ +static void dpu_crtc_frame_event_cb(void *data, u32 event) +{ + struct drm_crtc *crtc = (struct drm_crtc *)data; + struct dpu_crtc *dpu_crtc; + struct msm_drm_private *priv; + struct dpu_crtc_frame_event *fevent; + unsigned long flags; + u32 crtc_id; + + /* Nothing to do on idle event */ + if (event & DPU_ENCODER_FRAME_EVENT_IDLE) + return; + + dpu_crtc = to_dpu_crtc(crtc); + priv = crtc->dev->dev_private; + crtc_id = drm_crtc_index(crtc); + + trace_dpu_crtc_frame_event_cb(DRMID(crtc), event); + + spin_lock_irqsave(&dpu_crtc->spin_lock, flags); + fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list, + struct dpu_crtc_frame_event, list); + if (fevent) + list_del_init(&fevent->list); + spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); + + if (!fevent) { + DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); + return; + } + + fevent->event = event; + fevent->crtc = crtc; + fevent->ts = ktime_get(); + kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work); +} + +void dpu_crtc_complete_commit(struct drm_crtc *crtc) +{ + trace_dpu_crtc_complete_commit(DRMID(crtc)); + dpu_core_perf_crtc_update(crtc, 0, false); + _dpu_crtc_complete_flip(crtc); +} + +static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + struct drm_display_mode *adj_mode = &state->adjusted_mode; + u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers; + int i; + + for (i = 0; i < cstate->num_mixers; i++) { + struct drm_rect *r = &cstate->lm_bounds[i]; + r->x1 = crtc_split_width * i; + r->y1 = 0; + r->x2 = r->x1 + crtc_split_width; + r->y2 = adj_mode->vdisplay; + + trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r); + } +} + +static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, + struct dpu_hw_pcc_cfg *cfg) +{ + struct drm_color_ctm *ctm; + + memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); + + ctm = (struct drm_color_ctm *)state->ctm->data; + + if (!ctm) + return; + + cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); + cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); + cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); + + cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); + cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); + cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); + + cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); + cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); + cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); +} + +static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) +{ + struct drm_crtc_state *state = crtc->state; + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); + struct dpu_crtc_mixer *mixer = cstate->mixers; + struct dpu_hw_pcc_cfg cfg; + struct dpu_hw_ctl *ctl; + struct dpu_hw_dspp *dspp; + int i; + + + if (!state->color_mgmt_changed) + return; + + for (i = 0; i < cstate->num_mixers; i++) { + ctl = mixer[i].lm_ctl; + dspp = mixer[i].hw_dspp; + + if (!dspp || !dspp->ops.setup_pcc) + continue; + + if (!state->ctm) { + dspp->ops.setup_pcc(dspp, NULL); + } else { + _dpu_crtc_get_pcc_coeff(state, &cfg); + dspp->ops.setup_pcc(dspp, &cfg); + } + + /* stage config flush mask */ + ctl->ops.update_pending_flush_dspp(ctl, + mixer[i].hw_dspp->idx); + } +} + +static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); + struct drm_encoder *encoder; + + if (!crtc->state->enable) { + DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n", + crtc->base.id, crtc->state->enable); + return; + } + + DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); + + _dpu_crtc_setup_lm_bounds(crtc, crtc->state); + + /* encoder will trigger pending mask now */ + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) + dpu_encoder_trigger_kickoff_pending(encoder); + + /* + * If no mixers have been allocated in dpu_crtc_atomic_check(), + * it means we are trying to flush a CRTC whose state is disabled: + * nothing else needs to be done. + */ + if (unlikely(!cstate->num_mixers)) + return; + + _dpu_crtc_blend_setup(crtc); + + _dpu_crtc_setup_cp_blocks(crtc); + + /* + * PP_DONE irq is only used by command mode for now. + * It is better to request pending before FLUSH and START trigger + * to make sure no pp_done irq missed. + * This is safe because no pp_done will happen before SW trigger + * in command mode. + */ +} + +static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct dpu_crtc *dpu_crtc; + struct drm_device *dev; + struct drm_plane *plane; + struct msm_drm_private *priv; + unsigned long flags; + struct dpu_crtc_state *cstate; + + if (!crtc->state->enable) { + DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n", + crtc->base.id, crtc->state->enable); + return; + } + + DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); + + dpu_crtc = to_dpu_crtc(crtc); + cstate = to_dpu_crtc_state(crtc->state); + dev = crtc->dev; + priv = dev->dev_private; + + if (crtc->index >= ARRAY_SIZE(priv->event_thread)) { + DPU_ERROR("invalid crtc index[%d]\n", crtc->index); + return; + } + + WARN_ON(dpu_crtc->event); + spin_lock_irqsave(&dev->event_lock, flags); + dpu_crtc->event = crtc->state->event; + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* + * If no mixers has been allocated in dpu_crtc_atomic_check(), + * it means we are trying to flush a CRTC whose state is disabled: + * nothing else needs to be done. + */ + if (unlikely(!cstate->num_mixers)) + return; + + /* update performance setting before crtc kickoff */ + dpu_core_perf_crtc_update(crtc, 1, false); + + /* + * Final plane updates: Give each plane a chance to complete all + * required writes/flushing before crtc's "flush + * everything" call below. + */ + drm_atomic_crtc_for_each_plane(plane, crtc) { + if (dpu_crtc->smmu_state.transition_error) + dpu_plane_set_error(plane, true); + dpu_plane_flush(plane); + } + + /* Kickoff will be scheduled by outer layer */ +} + +/** + * dpu_crtc_destroy_state - state destroy hook + * @crtc: drm CRTC + * @state: CRTC state object to release + */ +static void dpu_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + + DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id); + + __drm_atomic_helper_crtc_destroy_state(state); + + kfree(cstate); +} + +static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + int ret, rc = 0; + + if (!atomic_read(&dpu_crtc->frame_pending)) { + DRM_DEBUG_ATOMIC("no frames pending\n"); + return 0; + } + + DPU_ATRACE_BEGIN("frame done completion wait"); + ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp, + msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS)); + if (!ret) { + DRM_ERROR("frame done wait timed out, ret:%d\n", ret); + rc = -ETIMEDOUT; + } + DPU_ATRACE_END("frame done completion wait"); + + return rc; +} + +void dpu_crtc_commit_kickoff(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); + + /* + * If no mixers has been allocated in dpu_crtc_atomic_check(), + * it means we are trying to start a CRTC whose state is disabled: + * nothing else needs to be done. + */ + if (unlikely(!cstate->num_mixers)) + return; + + DPU_ATRACE_BEGIN("crtc_commit"); + + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) { + if (!dpu_encoder_is_valid_for_commit(encoder)) { + DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n"); + goto end; + } + } + /* + * Encoder will flush/start now, unless it has a tx pending. If so, it + * may delay and flush at an irq event (e.g. ppdone) + */ + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) + dpu_encoder_prepare_for_kickoff(encoder); + + if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) { + /* acquire bandwidth and other resources */ + DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id); + } else + DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id); + + dpu_crtc->play_count++; + + dpu_vbif_clear_errors(dpu_kms); + + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) + dpu_encoder_kickoff(encoder); + + reinit_completion(&dpu_crtc->frame_done_comp); + +end: + DPU_ATRACE_END("crtc_commit"); +} + +static void dpu_crtc_reset(struct drm_crtc *crtc) +{ + struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL); + + if (crtc->state) + dpu_crtc_destroy_state(crtc, crtc->state); + + if (cstate) + __drm_atomic_helper_crtc_reset(crtc, &cstate->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); +} + +/** + * dpu_crtc_duplicate_state - state duplicate hook + * @crtc: Pointer to drm crtc structure + */ +static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state); + + cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); + if (!cstate) { + DPU_ERROR("failed to allocate state\n"); + return NULL; + } + + /* duplicate base helper */ + __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base); + + return &cstate->base; +} + +static void dpu_crtc_atomic_print_state(struct drm_printer *p, + const struct drm_crtc_state *state) +{ + const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); + int i; + + for (i = 0; i < cstate->num_mixers; i++) { + drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0); + drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); + if (cstate->mixers[i].hw_dspp) + drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0); + } +} + +static void dpu_crtc_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, + crtc); + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); + struct drm_encoder *encoder; + unsigned long flags; + bool release_bandwidth = false; + + DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); + + /* Disable/save vblank irq handling */ + drm_crtc_vblank_off(crtc); + + drm_for_each_encoder_mask(encoder, crtc->dev, + old_crtc_state->encoder_mask) { + /* in video mode, we hold an extra bandwidth reference + * as we cannot drop bandwidth at frame-done if any + * crtc is being used in video mode. + */ + if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) + release_bandwidth = true; + dpu_encoder_assign_crtc(encoder, NULL); + } + + /* wait for frame_event_done completion */ + if (_dpu_crtc_wait_for_frame_done(crtc)) + DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", + crtc->base.id, + atomic_read(&dpu_crtc->frame_pending)); + + trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc); + dpu_crtc->enabled = false; + + if (atomic_read(&dpu_crtc->frame_pending)) { + trace_dpu_crtc_disable_frame_pending(DRMID(crtc), + atomic_read(&dpu_crtc->frame_pending)); + if (release_bandwidth) + dpu_core_perf_crtc_release_bw(crtc); + atomic_set(&dpu_crtc->frame_pending, 0); + } + + dpu_core_perf_crtc_update(crtc, 0, true); + + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) + dpu_encoder_register_frame_event_callback(encoder, NULL, NULL); + + memset(cstate->mixers, 0, sizeof(cstate->mixers)); + cstate->num_mixers = 0; + + /* disable clk & bw control until clk & bw properties are set */ + cstate->bw_control = false; + cstate->bw_split_vote = false; + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } + + pm_runtime_put_sync(crtc->dev->dev); +} + +static void dpu_crtc_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct drm_encoder *encoder; + bool request_bandwidth = false; + + pm_runtime_get_sync(crtc->dev->dev); + + DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); + + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { + /* in video mode, we hold an extra bandwidth reference + * as we cannot drop bandwidth at frame-done if any + * crtc is being used in video mode. + */ + if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO) + request_bandwidth = true; + dpu_encoder_register_frame_event_callback(encoder, + dpu_crtc_frame_event_cb, (void *)crtc); + } + + if (request_bandwidth) + atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); + + trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc); + dpu_crtc->enabled = true; + + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) + dpu_encoder_assign_crtc(encoder, crtc); + + /* Enable/restore vblank irq handling */ + drm_crtc_vblank_on(crtc); +} + +struct plane_state { + struct dpu_plane_state *dpu_pstate; + const struct drm_plane_state *drm_pstate; + int stage; + u32 pipe_id; +}; + +static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate) +{ + struct drm_crtc *crtc = cstate->crtc; + struct drm_encoder *encoder; + + drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) { + if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) { + return true; + } + } + + return false; +} + +static int dpu_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state); + struct plane_state *pstates; + + const struct drm_plane_state *pstate; + struct drm_plane *plane; + struct drm_display_mode *mode; + + int cnt = 0, rc = 0, mixer_width = 0, i, z_pos; + + struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2]; + int multirect_count = 0; + const struct drm_plane_state *pipe_staged[SSPP_MAX]; + int left_zpos_cnt = 0, right_zpos_cnt = 0; + struct drm_rect crtc_rect = { 0 }; + bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state); + + pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL); + if (!pstates) + return -ENOMEM; + + if (!crtc_state->enable || !crtc_state->active) { + DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n", + crtc->base.id, crtc_state->enable, + crtc_state->active); + memset(&cstate->new_perf, 0, sizeof(cstate->new_perf)); + goto end; + } + + mode = &crtc_state->adjusted_mode; + DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name); + + /* force a full mode set if active state changed */ + if (crtc_state->active_changed) + crtc_state->mode_changed = true; + + memset(pipe_staged, 0, sizeof(pipe_staged)); + + if (cstate->num_mixers) { + mixer_width = mode->hdisplay / cstate->num_mixers; + + _dpu_crtc_setup_lm_bounds(crtc, crtc_state); + } + + crtc_rect.x2 = mode->hdisplay; + crtc_rect.y2 = mode->vdisplay; + + /* get plane state for all drm planes associated with crtc state */ + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { + struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate); + struct drm_rect dst, clip = crtc_rect; + + if (IS_ERR_OR_NULL(pstate)) { + rc = PTR_ERR(pstate); + DPU_ERROR("%s: failed to get plane%d state, %d\n", + dpu_crtc->name, plane->base.id, rc); + goto end; + } + if (cnt >= DPU_STAGE_MAX * 4) + continue; + + if (!pstate->visible) + continue; + + pstates[cnt].dpu_pstate = dpu_pstate; + pstates[cnt].drm_pstate = pstate; + pstates[cnt].stage = pstate->normalized_zpos; + pstates[cnt].pipe_id = dpu_plane_pipe(plane); + + dpu_pstate->needs_dirtyfb = needs_dirtyfb; + + if (pipe_staged[pstates[cnt].pipe_id]) { + multirect_plane[multirect_count].r0 = + pipe_staged[pstates[cnt].pipe_id]; + multirect_plane[multirect_count].r1 = pstate; + multirect_count++; + + pipe_staged[pstates[cnt].pipe_id] = NULL; + } else { + pipe_staged[pstates[cnt].pipe_id] = pstate; + } + + cnt++; + + dst = drm_plane_state_dest(pstate); + if (!drm_rect_intersect(&clip, &dst)) { + DPU_ERROR("invalid vertical/horizontal destination\n"); + DPU_ERROR("display: " DRM_RECT_FMT " plane: " + DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect), + DRM_RECT_ARG(&dst)); + rc = -E2BIG; + goto end; + } + } + + for (i = 1; i < SSPP_MAX; i++) { + if (pipe_staged[i]) + dpu_plane_clear_multirect(pipe_staged[i]); + } + + z_pos = -1; + for (i = 0; i < cnt; i++) { + /* reset counts at every new blend stage */ + if (pstates[i].stage != z_pos) { + left_zpos_cnt = 0; + right_zpos_cnt = 0; + z_pos = pstates[i].stage; + } + + /* verify z_pos setting before using it */ + if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) { + DPU_ERROR("> %d plane stages assigned\n", + DPU_STAGE_MAX - DPU_STAGE_0); + rc = -EINVAL; + goto end; + } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { + if (left_zpos_cnt == 2) { + DPU_ERROR("> 2 planes @ stage %d on left\n", + z_pos); + rc = -EINVAL; + goto end; + } + left_zpos_cnt++; + + } else { + if (right_zpos_cnt == 2) { + DPU_ERROR("> 2 planes @ stage %d on right\n", + z_pos); + rc = -EINVAL; + goto end; + } + right_zpos_cnt++; + } + + pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0; + DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos); + } + + for (i = 0; i < multirect_count; i++) { + if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) { + DPU_ERROR( + "multirect validation failed for planes (%d - %d)\n", + multirect_plane[i].r0->plane->base.id, + multirect_plane[i].r1->plane->base.id); + rc = -EINVAL; + goto end; + } + } + + atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref); + + rc = dpu_core_perf_crtc_check(crtc, crtc_state); + if (rc) { + DPU_ERROR("crtc%d failed performance check %d\n", + crtc->base.id, rc); + goto end; + } + + /* validate source split: + * use pstates sorted by stage to check planes on same stage + * we assume that all pipes are in source split so its valid to compare + * without taking into account left/right mixer placement + */ + for (i = 1; i < cnt; i++) { + struct plane_state *prv_pstate, *cur_pstate; + struct drm_rect left_rect, right_rect; + int32_t left_pid, right_pid; + int32_t stage; + + prv_pstate = &pstates[i - 1]; + cur_pstate = &pstates[i]; + if (prv_pstate->stage != cur_pstate->stage) + continue; + + stage = cur_pstate->stage; + + left_pid = prv_pstate->dpu_pstate->base.plane->base.id; + left_rect = drm_plane_state_dest(prv_pstate->drm_pstate); + + right_pid = cur_pstate->dpu_pstate->base.plane->base.id; + right_rect = drm_plane_state_dest(cur_pstate->drm_pstate); + + if (right_rect.x1 < left_rect.x1) { + swap(left_pid, right_pid); + swap(left_rect, right_rect); + } + + /** + * - planes are enumerated in pipe-priority order such that + * planes with lower drm_id must be left-most in a shared + * blend-stage when using source split. + * - planes in source split must be contiguous in width + * - planes in source split must have same dest yoff and height + */ + if (right_pid < left_pid) { + DPU_ERROR( + "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n", + stage, left_pid, right_pid); + rc = -EINVAL; + goto end; + } else if (right_rect.x1 != drm_rect_width(&left_rect)) { + DPU_ERROR("non-contiguous coordinates for src split. " + "stage: %d left: " DRM_RECT_FMT " right: " + DRM_RECT_FMT "\n", stage, + DRM_RECT_ARG(&left_rect), + DRM_RECT_ARG(&right_rect)); + rc = -EINVAL; + goto end; + } else if (left_rect.y1 != right_rect.y1 || + drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) { + DPU_ERROR("source split at stage: %d. invalid " + "yoff/height: left: " DRM_RECT_FMT " right: " + DRM_RECT_FMT "\n", stage, + DRM_RECT_ARG(&left_rect), + DRM_RECT_ARG(&right_rect)); + rc = -EINVAL; + goto end; + } + } + +end: + kfree(pstates); + return rc; +} + +int dpu_crtc_vblank(struct drm_crtc *crtc, bool en) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct drm_encoder *enc; + + trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc); + + /* + * Normally we would iterate through encoder_mask in crtc state to find + * attached encoders. In this case, we might be disabling vblank _after_ + * encoder_mask has been cleared. + * + * Instead, we "assign" a crtc to the encoder in enable and clear it in + * disable (which is also after encoder_mask is cleared). So instead of + * using encoder mask, we'll ask the encoder to toggle itself iff it's + * currently assigned to our crtc. + * + * Note also that this function cannot be called while crtc is disabled + * since we use drm_crtc_vblank_on/off. So we don't need to worry + * about the assigned crtcs being inconsistent with the current state + * (which means no need to worry about modeset locks). + */ + list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { + trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en, + dpu_crtc); + + dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en); + } + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static int _dpu_debugfs_status_show(struct seq_file *s, void *data) +{ + struct dpu_crtc *dpu_crtc; + struct dpu_plane_state *pstate = NULL; + struct dpu_crtc_mixer *m; + + struct drm_crtc *crtc; + struct drm_plane *plane; + struct drm_display_mode *mode; + struct drm_framebuffer *fb; + struct drm_plane_state *state; + struct dpu_crtc_state *cstate; + + int i, out_width; + + dpu_crtc = s->private; + crtc = &dpu_crtc->base; + + drm_modeset_lock_all(crtc->dev); + cstate = to_dpu_crtc_state(crtc->state); + + mode = &crtc->state->adjusted_mode; + out_width = mode->hdisplay / cstate->num_mixers; + + seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id, + mode->hdisplay, mode->vdisplay); + + seq_puts(s, "\n"); + + for (i = 0; i < cstate->num_mixers; ++i) { + m = &cstate->mixers[i]; + seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n", + m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0, + out_width, mode->vdisplay); + } + + seq_puts(s, "\n"); + + drm_atomic_crtc_for_each_plane(plane, crtc) { + pstate = to_dpu_plane_state(plane->state); + state = plane->state; + + if (!pstate || !state) + continue; + + seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id, + pstate->stage); + + if (plane->state->fb) { + fb = plane->state->fb; + + seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ", + fb->base.id, (char *) &fb->format->format, + fb->width, fb->height); + for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i) + seq_printf(s, "cpp[%d]:%u ", + i, fb->format->cpp[i]); + seq_puts(s, "\n\t"); + + seq_printf(s, "modifier:%8llu ", fb->modifier); + seq_puts(s, "\n"); + + seq_puts(s, "\t"); + for (i = 0; i < ARRAY_SIZE(fb->pitches); i++) + seq_printf(s, "pitches[%d]:%8u ", i, + fb->pitches[i]); + seq_puts(s, "\n"); + + seq_puts(s, "\t"); + for (i = 0; i < ARRAY_SIZE(fb->offsets); i++) + seq_printf(s, "offsets[%d]:%8u ", i, + fb->offsets[i]); + seq_puts(s, "\n"); + } + + seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n", + state->src_x, state->src_y, state->src_w, state->src_h); + + seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n", + state->crtc_x, state->crtc_y, state->crtc_w, + state->crtc_h); + seq_printf(s, "\tmultirect: mode: %d index: %d\n", + pstate->multirect_mode, pstate->multirect_index); + + seq_puts(s, "\n"); + } + if (dpu_crtc->vblank_cb_count) { + ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time); + s64 diff_ms = ktime_to_ms(diff); + s64 fps = diff_ms ? div_s64( + dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0; + + seq_printf(s, + "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n", + fps, dpu_crtc->vblank_cb_count, + ktime_to_ms(diff), dpu_crtc->play_count); + + /* reset time & count for next measurement */ + dpu_crtc->vblank_cb_count = 0; + dpu_crtc->vblank_cb_time = ktime_set(0, 0); + } + + drm_modeset_unlock_all(crtc->dev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status); + +static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v) +{ + struct drm_crtc *crtc = (struct drm_crtc *) s->private; + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + + seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc)); + seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc)); + seq_printf(s, "core_clk_rate: %llu\n", + dpu_crtc->cur_perf.core_clk_rate); + seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl); + seq_printf(s, "max_per_pipe_ib: %llu\n", + dpu_crtc->cur_perf.max_per_pipe_ib); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state); + +static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) +{ + struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); + struct dentry *debugfs_root; + + debugfs_root = debugfs_create_dir(dpu_crtc->name, + crtc->dev->primary->debugfs_root); + + debugfs_create_file("status", 0400, + debugfs_root, + dpu_crtc, &_dpu_debugfs_status_fops); + debugfs_create_file("state", 0600, + debugfs_root, + &dpu_crtc->base, + &dpu_crtc_debugfs_state_fops); + + return 0; +} +#else +static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc) +{ + return 0; +} +#endif /* CONFIG_DEBUG_FS */ + +static int dpu_crtc_late_register(struct drm_crtc *crtc) +{ + return _dpu_crtc_init_debugfs(crtc); +} + +static const struct drm_crtc_funcs dpu_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = dpu_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = dpu_crtc_reset, + .atomic_duplicate_state = dpu_crtc_duplicate_state, + .atomic_destroy_state = dpu_crtc_destroy_state, + .atomic_print_state = dpu_crtc_atomic_print_state, + .late_register = dpu_crtc_late_register, + .verify_crc_source = dpu_crtc_verify_crc_source, + .set_crc_source = dpu_crtc_set_crc_source, + .enable_vblank = msm_crtc_enable_vblank, + .disable_vblank = msm_crtc_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, + .get_vblank_counter = dpu_crtc_get_vblank_counter, +}; + +static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = { + .atomic_disable = dpu_crtc_disable, + .atomic_enable = dpu_crtc_enable, + .atomic_check = dpu_crtc_atomic_check, + .atomic_begin = dpu_crtc_atomic_begin, + .atomic_flush = dpu_crtc_atomic_flush, + .get_scanout_position = dpu_crtc_get_scanout_position, +}; + +/* initialize crtc */ +struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, + struct drm_plane *cursor) +{ + struct msm_drm_private *priv = dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_crtc *crtc = NULL; + struct dpu_crtc *dpu_crtc = NULL; + int i; + + dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL); + if (!dpu_crtc) + return ERR_PTR(-ENOMEM); + + crtc = &dpu_crtc->base; + crtc->dev = dev; + + spin_lock_init(&dpu_crtc->spin_lock); + atomic_set(&dpu_crtc->frame_pending, 0); + + init_completion(&dpu_crtc->frame_done_comp); + + INIT_LIST_HEAD(&dpu_crtc->frame_event_list); + + for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) { + INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list); + list_add(&dpu_crtc->frame_events[i].list, + &dpu_crtc->frame_event_list); + kthread_init_work(&dpu_crtc->frame_events[i].work, + dpu_crtc_frame_event_work); + } + + drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs, + NULL); + + drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); + + if (dpu_kms->catalog->dspp_count) + drm_crtc_enable_color_mgmt(crtc, 0, true, 0); + + /* save user friendly CRTC name for later */ + snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); + + /* initialize event handling */ + spin_lock_init(&dpu_crtc->event_lock); + + DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name); + return crtc; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h new file mode 100644 index 000000000..539b68b16 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -0,0 +1,303 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef _DPU_CRTC_H_ +#define _DPU_CRTC_H_ + +#include +#include +#include "dpu_kms.h" +#include "dpu_core_perf.h" + +#define DPU_CRTC_NAME_SIZE 12 + +/* define the maximum number of in-flight frame events */ +#define DPU_CRTC_FRAME_EVENT_SIZE 4 + +/** + * enum dpu_crtc_client_type: crtc client type + * @RT_CLIENT: RealTime client like video/cmd mode display + * voting through apps rsc + * @NRT_CLIENT: Non-RealTime client like WB display + * voting through apps rsc + */ +enum dpu_crtc_client_type { + RT_CLIENT, + NRT_CLIENT, +}; + +/** + * enum dpu_crtc_smmu_state: smmu state + * @ATTACHED: all the context banks are attached. + * @DETACHED: all the context banks are detached. + * @ATTACH_ALL_REQ: transient state of attaching context banks. + * @DETACH_ALL_REQ: transient state of detaching context banks. + */ +enum dpu_crtc_smmu_state { + ATTACHED = 0, + DETACHED, + ATTACH_ALL_REQ, + DETACH_ALL_REQ, +}; + +/** + * enum dpu_crtc_smmu_state_transition_type: state transition type + * @NONE: no pending state transitions + * @PRE_COMMIT: state transitions should be done before processing the commit + * @POST_COMMIT: state transitions to be done after processing the commit. + */ +enum dpu_crtc_smmu_state_transition_type { + NONE, + PRE_COMMIT, + POST_COMMIT +}; + +/** + * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type + * @state: current state of smmu context banks + * @transition_type: transition request type + * @transition_error: whether there is error while transitioning the state + */ +struct dpu_crtc_smmu_state_data { + uint32_t state; + uint32_t transition_type; + uint32_t transition_error; +}; + +/** + * enum dpu_crtc_crc_source: CRC source + * @DPU_CRTC_CRC_SOURCE_NONE: no source set + * @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer + * @DPU_CRTC_CRC_SOURCE_ENCODER: CRC in encoder + * @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source + */ +enum dpu_crtc_crc_source { + DPU_CRTC_CRC_SOURCE_NONE = 0, + DPU_CRTC_CRC_SOURCE_LAYER_MIXER, + DPU_CRTC_CRC_SOURCE_ENCODER, + DPU_CRTC_CRC_SOURCE_MAX, + DPU_CRTC_CRC_SOURCE_INVALID = -1 +}; + +/** + * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC + * @hw_lm: LM HW Driver context + * @lm_ctl: CTL Path HW driver context + * @lm_dspp: DSPP HW driver context + * @mixer_op_mode: mixer blending operation mode + * @flush_mask: mixer flush mask for ctl, mixer and pipe + */ +struct dpu_crtc_mixer { + struct dpu_hw_mixer *hw_lm; + struct dpu_hw_ctl *lm_ctl; + struct dpu_hw_dspp *hw_dspp; + u32 mixer_op_mode; +}; + +/** + * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing + * @work: base work structure + * @crtc: Pointer to crtc handling this event + * @list: event list + * @ts: timestamp at queue entry + * @event: event identifier + */ +struct dpu_crtc_frame_event { + struct kthread_work work; + struct drm_crtc *crtc; + struct list_head list; + ktime_t ts; + u32 event; +}; + +/* + * Maximum number of free event structures to cache + */ +#define DPU_CRTC_MAX_EVENT_COUNT 16 + +/** + * struct dpu_crtc - virtualized CRTC data structure + * @base : Base drm crtc structure + * @name : ASCII description of this crtc + * @event : Pointer to last received drm vblank event. If there is a + * pending vblank event, this will be non-null. + * @vsync_count : Running count of received vsync events + * @drm_requested_vblank : Whether vblanks have been enabled in the encoder + * @property_info : Opaque structure for generic property support + * @property_defaults : Array of default values for generic property support + * @vblank_cb_count : count of vblank callback since last reset + * @play_count : frame count between crtc enable and disable + * @vblank_cb_time : ktime at vblank count reset + * @enabled : whether the DPU CRTC is currently enabled. updated in the + * commit-thread, not state-swap time which is earlier, so + * safe to make decisions on during VBLANK on/off work + * @feature_list : list of color processing features supported on a crtc + * @active_list : list of color processing features are active + * @dirty_list : list of color processing features are dirty + * @ad_dirty: list containing ad properties that are dirty + * @ad_active: list containing ad properties that are active + * @frame_pending : Whether or not an update is pending + * @frame_events : static allocation of in-flight frame events + * @frame_event_list : available frame event list + * @spin_lock : spin lock for frame event, transaction status, etc... + * @frame_done_comp : for frame_event_done synchronization + * @event_thread : Pointer to event handler thread + * @event_worker : Event worker queue + * @event_lock : Spinlock around event handling code + * @phandle: Pointer to power handler + * @cur_perf : current performance committed to clock/bandwidth driver + * @crc_source : CRC source + */ +struct dpu_crtc { + struct drm_crtc base; + char name[DPU_CRTC_NAME_SIZE]; + + struct drm_pending_vblank_event *event; + u32 vsync_count; + + u32 vblank_cb_count; + u64 play_count; + ktime_t vblank_cb_time; + bool enabled; + + struct list_head feature_list; + struct list_head active_list; + struct list_head dirty_list; + struct list_head ad_dirty; + struct list_head ad_active; + + atomic_t frame_pending; + struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE]; + struct list_head frame_event_list; + spinlock_t spin_lock; + struct completion frame_done_comp; + + /* for handling internal event thread */ + spinlock_t event_lock; + + struct dpu_core_perf_params cur_perf; + + struct dpu_crtc_smmu_state_data smmu_state; +}; + +#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base) + +/** + * struct dpu_crtc_state - dpu container for atomic crtc state + * @base: Base drm crtc state structure + * @bw_control : true if bw/clk controlled by core bw/clk properties + * @bw_split_vote : true if bw controlled by llcc/dram bw properties + * @lm_bounds : LM boundaries based on current mode full resolution, no ROI. + * Origin top left of CRTC. + * @property_state: Local storage for msm_prop properties + * @property_values: Current crtc property values + * @input_fence_timeout_ns : Cached input fence timeout, in ns + * @new_perf: new performance state being requested + * @num_mixers : Number of mixers in use + * @mixers : List of active mixers + * @num_ctls : Number of ctl paths in use + * @hw_ctls : List of active ctl paths + * @crc_source : CRC source + * @crc_frame_skip_count: Number of frames skipped before getting CRC + */ +struct dpu_crtc_state { + struct drm_crtc_state base; + + bool bw_control; + bool bw_split_vote; + struct drm_rect lm_bounds[CRTC_DUAL_MIXERS]; + + uint64_t input_fence_timeout_ns; + + struct dpu_core_perf_params new_perf; + + /* HW Resources reserved for the crtc */ + u32 num_mixers; + struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS]; + + u32 num_ctls; + struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS]; + + enum dpu_crtc_crc_source crc_source; + int crc_frame_skip_count; +}; + +#define to_dpu_crtc_state(x) \ + container_of(x, struct dpu_crtc_state, base) + +/** + * dpu_crtc_frame_pending - retun the number of pending frames + * @crtc: Pointer to drm crtc object + */ +static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc) +{ + return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL; +} + +/** + * dpu_crtc_vblank - enable or disable vblanks for this crtc + * @crtc: Pointer to drm crtc object + * @en: true to enable vblanks, false to disable + */ +int dpu_crtc_vblank(struct drm_crtc *crtc, bool en); + +/** + * dpu_crtc_vblank_callback - called on vblank irq, issues completion events + * @crtc: Pointer to drm crtc object + */ +void dpu_crtc_vblank_callback(struct drm_crtc *crtc); + +/** + * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc + * @crtc: Pointer to drm crtc object + */ +void dpu_crtc_commit_kickoff(struct drm_crtc *crtc); + +/** + * dpu_crtc_complete_commit - callback signalling completion of current commit + * @crtc: Pointer to drm crtc object + */ +void dpu_crtc_complete_commit(struct drm_crtc *crtc); + +/** + * dpu_crtc_init - create a new crtc object + * @dev: dpu device + * @plane: base plane + * @cursor: cursor plane + * @Return: new crtc object or error + */ +struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, + struct drm_plane *cursor); + +/** + * dpu_crtc_register_custom_event - api for enabling/disabling crtc event + * @kms: Pointer to dpu_kms + * @crtc_drm: Pointer to crtc object + * @event: Event that client is interested + * @en: Flag to enable/disable the event + */ +int dpu_crtc_register_custom_event(struct dpu_kms *kms, + struct drm_crtc *crtc_drm, u32 event, bool en); + +/** + * dpu_crtc_get_intf_mode - get interface mode of the given crtc + * @crtc: Pointert to crtc + */ +enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc); + +/** + * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc. + * @crtc: Pointer to crtc + */ +static inline enum dpu_crtc_client_type dpu_crtc_get_client_type( + struct drm_crtc *crtc) +{ + return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT; +} + +#endif /* _DPU_CRTC_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c new file mode 100644 index 000000000..b0eb881f8 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -0,0 +1,2556 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Author: Rob Clark + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include +#include +#include + +#include +#include +#include + +#include "msm_drv.h" +#include "dpu_kms.h" +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_intf.h" +#include "dpu_hw_ctl.h" +#include "dpu_hw_dspp.h" +#include "dpu_hw_dsc.h" +#include "dpu_hw_merge3d.h" +#include "dpu_formats.h" +#include "dpu_encoder_phys.h" +#include "dpu_crtc.h" +#include "dpu_trace.h" +#include "dpu_core_irq.h" +#include "disp/msm_disp_snapshot.h" + +#define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\ + (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) + +#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\ + (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) + +/* + * Two to anticipate panels that can do cmd/vid dynamic switching + * plan is to create all possible physical encoder types, and switch between + * them at runtime + */ +#define NUM_PHYS_ENCODER_TYPES 2 + +#define MAX_PHYS_ENCODERS_PER_VIRTUAL \ + (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) + +#define MAX_CHANNELS_PER_ENC 2 + +#define IDLE_SHORT_TIMEOUT 1 + +#define MAX_HDISPLAY_SPLIT 1080 + +/* timeout in frames waiting for frame done */ +#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 + +/** + * enum dpu_enc_rc_events - events for resource control state machine + * @DPU_ENC_RC_EVENT_KICKOFF: + * This event happens at NORMAL priority. + * Event that signals the start of the transfer. When this event is + * received, enable MDP/DSI core clocks. Regardless of the previous + * state, the resource should be in ON state at the end of this event. + * @DPU_ENC_RC_EVENT_FRAME_DONE: + * This event happens at INTERRUPT level. + * Event signals the end of the data transfer after the PP FRAME_DONE + * event. At the end of this event, a delayed work is scheduled to go to + * IDLE_PC state after IDLE_TIMEOUT time. + * @DPU_ENC_RC_EVENT_PRE_STOP: + * This event happens at NORMAL priority. + * This event, when received during the ON state, leave the RC STATE + * in the PRE_OFF state. It should be followed by the STOP event as + * part of encoder disable. + * If received during IDLE or OFF states, it will do nothing. + * @DPU_ENC_RC_EVENT_STOP: + * This event happens at NORMAL priority. + * When this event is received, disable all the MDP/DSI core clocks, and + * disable IRQs. It should be called from the PRE_OFF or IDLE states. + * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. + * PRE_OFF is expected when PRE_STOP was executed during the ON state. + * Resource state should be in OFF at the end of the event. + * @DPU_ENC_RC_EVENT_ENTER_IDLE: + * This event happens at NORMAL priority from a work item. + * Event signals that there were no frame updates for IDLE_TIMEOUT time. + * This would disable MDP/DSI core clocks and change the resource state + * to IDLE. + */ +enum dpu_enc_rc_events { + DPU_ENC_RC_EVENT_KICKOFF = 1, + DPU_ENC_RC_EVENT_FRAME_DONE, + DPU_ENC_RC_EVENT_PRE_STOP, + DPU_ENC_RC_EVENT_STOP, + DPU_ENC_RC_EVENT_ENTER_IDLE +}; + +/* + * enum dpu_enc_rc_states - states that the resource control maintains + * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state + * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state + * @DPU_ENC_RC_STATE_ON: Resource is in ON state + * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state + * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state + */ +enum dpu_enc_rc_states { + DPU_ENC_RC_STATE_OFF, + DPU_ENC_RC_STATE_PRE_OFF, + DPU_ENC_RC_STATE_ON, + DPU_ENC_RC_STATE_IDLE +}; + +/** + * struct dpu_encoder_virt - virtual encoder. Container of one or more physical + * encoders. Virtual encoder manages one "logical" display. Physical + * encoders manage one intf block, tied to a specific panel/sub-panel. + * Virtual encoder defers as much as possible to the physical encoders. + * Virtual encoder registers itself with the DRM Framework as the encoder. + * @base: drm_encoder base class for registration with DRM + * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes + * @enabled: True if the encoder is active, protected by enc_lock + * @num_phys_encs: Actual number of physical encoders contained. + * @phys_encs: Container of physical encoders managed. + * @cur_master: Pointer to the current master in this mode. Optimization + * Only valid after enable. Cleared as disable. + * @cur_slave: As above but for the slave encoder. + * @hw_pp: Handle to the pingpong blocks used for the display. No. + * pingpong blocks can be different than num_phys_encs. + * @hw_dsc: Handle to the DSC blocks used for the display. + * @dsc_mask: Bitmask of used DSC blocks. + * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped + * for partial update right-only cases, such as pingpong + * split where virtual pingpong does not generate IRQs + * @crtc: Pointer to the currently assigned crtc. Normally you + * would use crtc->state->encoder_mask to determine the + * link between encoder/crtc. However in this case we need + * to track crtc in the disable() hook which is called + * _after_ encoder_mask is cleared. + * @connector: If a mode is set, cached pointer to the active connector + * @crtc_kickoff_cb: Callback into CRTC that will flush & start + * all CTL paths + * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb + * @debugfs_root: Debug file system root file node + * @enc_lock: Lock around physical encoder + * create/destroy/enable/disable + * @frame_busy_mask: Bitmask tracking which phys_enc we are still + * busy processing current command. + * Bit0 = phys_encs[0] etc. + * @crtc_frame_event_cb: callback handler for frame event + * @crtc_frame_event_cb_data: callback handler private data + * @frame_done_timeout_ms: frame done timeout in ms + * @frame_done_timer: watchdog timer for frame done event + * @vsync_event_timer: vsync timer + * @disp_info: local copy of msm_display_info struct + * @idle_pc_supported: indicate if idle power collaps is supported + * @rc_lock: resource control mutex lock to protect + * virt encoder over various state changes + * @rc_state: resource controller state + * @delayed_off_work: delayed worker to schedule disabling of + * clks and resources after IDLE_TIMEOUT time. + * @vsync_event_work: worker to handle vsync event for autorefresh + * @topology: topology of the display + * @idle_timeout: idle timeout duration in milliseconds + * @dsc: drm_dsc_config pointer, for DSC-enabled encoders + */ +struct dpu_encoder_virt { + struct drm_encoder base; + spinlock_t enc_spinlock; + + bool enabled; + + unsigned int num_phys_encs; + struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; + struct dpu_encoder_phys *cur_master; + struct dpu_encoder_phys *cur_slave; + struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; + + unsigned int dsc_mask; + + bool intfs_swapped; + + struct drm_crtc *crtc; + struct drm_connector *connector; + + struct dentry *debugfs_root; + struct mutex enc_lock; + DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); + void (*crtc_frame_event_cb)(void *, u32 event); + void *crtc_frame_event_cb_data; + + atomic_t frame_done_timeout_ms; + struct timer_list frame_done_timer; + struct timer_list vsync_event_timer; + + struct msm_display_info disp_info; + + bool idle_pc_supported; + struct mutex rc_lock; + enum dpu_enc_rc_states rc_state; + struct delayed_work delayed_off_work; + struct kthread_work vsync_event_work; + struct msm_display_topology topology; + + u32 idle_timeout; + + bool wide_bus_en; + + /* DSC configuration */ + struct drm_dsc_config *dsc; +}; + +#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) + +static u32 dither_matrix[DITHER_MATRIX_SZ] = { + 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 +}; + + +bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc) +{ + const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + + return dpu_enc->wide_bus_en; +} + +int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + int i, num_intf = 0; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->hw_intf && phys->hw_intf->ops.setup_misr + && phys->hw_intf->ops.collect_misr) + num_intf++; + } + + return num_intf; +} + +void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + + int i; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) + continue; + + phys->hw_intf->ops.setup_misr(phys->hw_intf); + } +} + +int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos) +{ + struct dpu_encoder_virt *dpu_enc; + + int i, rc = 0, entries_added = 0; + + if (!drm_enc->crtc) { + DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); + return -EINVAL; + } + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) + continue; + + rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]); + if (rc) + return rc; + entries_added++; + } + + return entries_added; +} + +static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc) +{ + struct dpu_hw_dither_cfg dither_cfg = { 0 }; + + if (!hw_pp->ops.setup_dither) + return; + + switch (bpc) { + case 6: + dither_cfg.c0_bitdepth = 6; + dither_cfg.c1_bitdepth = 6; + dither_cfg.c2_bitdepth = 6; + dither_cfg.c3_bitdepth = 6; + dither_cfg.temporal_en = 0; + break; + default: + hw_pp->ops.setup_dither(hw_pp, NULL); + return; + } + + memcpy(&dither_cfg.matrix, dither_matrix, + sizeof(u32) * DITHER_MATRIX_SZ); + + hw_pp->ops.setup_dither(hw_pp, &dither_cfg); +} + +static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode) +{ + switch (intf_mode) { + case INTF_MODE_VIDEO: + return "INTF_MODE_VIDEO"; + case INTF_MODE_CMD: + return "INTF_MODE_CMD"; + case INTF_MODE_WB_BLOCK: + return "INTF_MODE_WB_BLOCK"; + case INTF_MODE_WB_LINE: + return "INTF_MODE_WB_LINE"; + default: + return "INTF_MODE_UNKNOWN"; + } +} + +void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, + enum dpu_intr_idx intr_idx) +{ + DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n", + DRMID(phys_enc->parent), + dpu_encoder_helper_get_intf_type(phys_enc->intf_mode), + phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0, + phys_enc->hw_pp->idx - PINGPONG_0, intr_idx); + + if (phys_enc->parent_ops->handle_frame_done) + phys_enc->parent_ops->handle_frame_done( + phys_enc->parent, phys_enc, + DPU_ENCODER_FRAME_EVENT_ERROR); +} + +static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id, + u32 irq_idx, struct dpu_encoder_wait_info *info); + +int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, + int irq, + void (*func)(void *arg, int irq_idx), + struct dpu_encoder_wait_info *wait_info) +{ + u32 irq_status; + int ret; + + if (!wait_info) { + DPU_ERROR("invalid params\n"); + return -EINVAL; + } + /* note: do master / slave checking outside */ + + /* return EWOULDBLOCK since we know the wait isn't necessary */ + if (phys_enc->enable_state == DPU_ENC_DISABLED) { + DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n", + DRMID(phys_enc->parent), func, + irq); + return -EWOULDBLOCK; + } + + if (irq < 0) { + DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n", + DRMID(phys_enc->parent), func); + return 0; + } + + DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n", + DRMID(phys_enc->parent), func, + irq, phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(wait_info->atomic_cnt)); + + ret = dpu_encoder_helper_wait_event_timeout( + DRMID(phys_enc->parent), + irq, + wait_info); + + if (ret <= 0) { + irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq); + if (irq_status) { + unsigned long flags; + + DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n", + DRMID(phys_enc->parent), func, + irq, + phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(wait_info->atomic_cnt)); + local_irq_save(flags); + func(phys_enc, irq); + local_irq_restore(flags); + ret = 0; + } else { + ret = -ETIMEDOUT; + DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n", + DRMID(phys_enc->parent), func, + irq, + phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(wait_info->atomic_cnt)); + } + } else { + ret = 0; + trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent), + func, irq, + phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(wait_info->atomic_cnt)); + } + + return ret; +} + +int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; + return phys ? atomic_read(&phys->vsync_cnt) : 0; +} + +int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + struct dpu_encoder_phys *phys; + int linecount = 0; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + phys = dpu_enc ? dpu_enc->cur_master : NULL; + + if (phys && phys->ops.get_line_count) + linecount = phys->ops.get_line_count(phys); + + return linecount; +} + +static void dpu_encoder_destroy(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = NULL; + int i = 0; + + if (!drm_enc) { + DPU_ERROR("invalid encoder\n"); + return; + } + + dpu_enc = to_dpu_encoder_virt(drm_enc); + DPU_DEBUG_ENC(dpu_enc, "\n"); + + mutex_lock(&dpu_enc->enc_lock); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->ops.destroy) { + phys->ops.destroy(phys); + --dpu_enc->num_phys_encs; + dpu_enc->phys_encs[i] = NULL; + } + } + + if (dpu_enc->num_phys_encs) + DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n", + dpu_enc->num_phys_encs); + dpu_enc->num_phys_encs = 0; + mutex_unlock(&dpu_enc->enc_lock); + + drm_encoder_cleanup(drm_enc); + mutex_destroy(&dpu_enc->enc_lock); +} + +void dpu_encoder_helper_split_config( + struct dpu_encoder_phys *phys_enc, + enum dpu_intf interface) +{ + struct dpu_encoder_virt *dpu_enc; + struct split_pipe_cfg cfg = { 0 }; + struct dpu_hw_mdp *hw_mdptop; + struct msm_display_info *disp_info; + + if (!phys_enc->hw_mdptop || !phys_enc->parent) { + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); + return; + } + + dpu_enc = to_dpu_encoder_virt(phys_enc->parent); + hw_mdptop = phys_enc->hw_mdptop; + disp_info = &dpu_enc->disp_info; + + if (disp_info->intf_type != DRM_MODE_ENCODER_DSI) + return; + + /** + * disable split modes since encoder will be operating in as the only + * encoder, either for the entire use case in the case of, for example, + * single DSI, or for this frame in the case of left/right only partial + * update. + */ + if (phys_enc->split_role == ENC_ROLE_SOLO) { + if (hw_mdptop->ops.setup_split_pipe) + hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); + return; + } + + cfg.en = true; + cfg.mode = phys_enc->intf_mode; + cfg.intf = interface; + + if (cfg.en && phys_enc->ops.needs_single_flush && + phys_enc->ops.needs_single_flush(phys_enc)) + cfg.split_flush_en = true; + + if (phys_enc->split_role == ENC_ROLE_MASTER) { + DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en); + + if (hw_mdptop->ops.setup_split_pipe) + hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); + } +} + +bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + int i, intf_count = 0, num_dsc = 0; + + for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) + if (dpu_enc->phys_encs[i]) + intf_count++; + + /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ + if (dpu_enc->dsc) + num_dsc = 2; + + return (num_dsc > 0) && (num_dsc > intf_count); +} + +static struct msm_display_topology dpu_encoder_get_topology( + struct dpu_encoder_virt *dpu_enc, + struct dpu_kms *dpu_kms, + struct drm_display_mode *mode) +{ + struct msm_display_topology topology = {0}; + int i, intf_count = 0; + + for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) + if (dpu_enc->phys_encs[i]) + intf_count++; + + /* Datapath topology selection + * + * Dual display + * 2 LM, 2 INTF ( Split display using 2 interfaces) + * + * Single display + * 1 LM, 1 INTF + * 2 LM, 1 INTF (stream merge to support high resolution interfaces) + * + * Adding color blocks only to primary interface if available in + * sufficient number + */ + if (intf_count == 2) + topology.num_lm = 2; + else if (!dpu_kms->catalog->caps->has_3d_merge) + topology.num_lm = 1; + else + topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; + + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) { + if (dpu_kms->catalog->dspp && + (dpu_kms->catalog->dspp_count >= topology.num_lm)) + topology.num_dspp = topology.num_lm; + } + + topology.num_enc = 0; + topology.num_intf = intf_count; + + if (dpu_enc->dsc) { + /* In case of Display Stream Compression (DSC), we would use + * 2 encoders, 2 layer mixers and 1 interface + * this is power optimal and can drive up to (including) 4k + * screens + */ + topology.num_enc = 2; + topology.num_dsc = 2; + topology.num_intf = 1; + topology.num_lm = 2; + } + + return topology; +} + +static int dpu_encoder_virt_atomic_check( + struct drm_encoder *drm_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct dpu_encoder_virt *dpu_enc; + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms; + struct drm_display_mode *adj_mode; + struct msm_display_topology topology; + struct dpu_global_state *global_state; + int i = 0; + int ret = 0; + + if (!drm_enc || !crtc_state || !conn_state) { + DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", + drm_enc != NULL, crtc_state != NULL, conn_state != NULL); + return -EINVAL; + } + + dpu_enc = to_dpu_encoder_virt(drm_enc); + DPU_DEBUG_ENC(dpu_enc, "\n"); + + priv = drm_enc->dev->dev_private; + dpu_kms = to_dpu_kms(priv->kms); + adj_mode = &crtc_state->adjusted_mode; + global_state = dpu_kms_get_global_state(crtc_state->state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + trace_dpu_enc_atomic_check(DRMID(drm_enc)); + + /* perform atomic check on the first physical encoder (master) */ + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->ops.atomic_check) + ret = phys->ops.atomic_check(phys, crtc_state, + conn_state); + if (ret) { + DPU_ERROR_ENC(dpu_enc, + "mode unsupported, phys idx %d\n", i); + break; + } + } + + topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode); + + /* Reserve dynamic resources now. */ + if (!ret) { + /* + * Release and Allocate resources on every modeset + * Dont allocate when active is false. + */ + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + dpu_rm_release(global_state, drm_enc); + + if (!crtc_state->active_changed || crtc_state->enable) + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, + drm_enc, crtc_state, topology); + } + } + + trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); + + return ret; +} + +static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, + struct msm_display_info *disp_info) +{ + struct dpu_vsync_source_cfg vsync_cfg = { 0 }; + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms; + struct dpu_hw_mdp *hw_mdptop; + struct drm_encoder *drm_enc; + int i; + + if (!dpu_enc || !disp_info) { + DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n", + dpu_enc != NULL, disp_info != NULL); + return; + } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) { + DPU_ERROR("invalid num phys enc %d/%d\n", + dpu_enc->num_phys_encs, + (int) ARRAY_SIZE(dpu_enc->hw_pp)); + return; + } + + drm_enc = &dpu_enc->base; + /* this pointers are checked in virt_enable_helper */ + priv = drm_enc->dev->dev_private; + + dpu_kms = to_dpu_kms(priv->kms); + hw_mdptop = dpu_kms->hw_mdp; + if (!hw_mdptop) { + DPU_ERROR("invalid mdptop\n"); + return; + } + + if (hw_mdptop->ops.setup_vsync_source && + disp_info->is_cmd_mode) { + for (i = 0; i < dpu_enc->num_phys_encs; i++) + vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx; + + vsync_cfg.pp_count = dpu_enc->num_phys_encs; + if (disp_info->is_te_using_watchdog_timer) + vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0; + else + vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO; + + hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg); + } +} + +static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable) +{ + struct dpu_encoder_virt *dpu_enc; + int i; + + if (!drm_enc) { + DPU_ERROR("invalid encoder\n"); + return; + } + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable); + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->ops.irq_control) + phys->ops.irq_control(phys, enable); + } + +} + +static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc, + bool enable) +{ + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms; + struct dpu_encoder_virt *dpu_enc; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + priv = drm_enc->dev->dev_private; + dpu_kms = to_dpu_kms(priv->kms); + + trace_dpu_enc_rc_helper(DRMID(drm_enc), enable); + + if (!dpu_enc->cur_master) { + DPU_ERROR("encoder master not set\n"); + return; + } + + if (enable) { + /* enable DPU core clks */ + pm_runtime_get_sync(&dpu_kms->pdev->dev); + + /* enable all the irq */ + _dpu_encoder_irq_control(drm_enc, true); + + } else { + /* disable all the irq */ + _dpu_encoder_irq_control(drm_enc, false); + + /* disable DPU core clks */ + pm_runtime_put_sync(&dpu_kms->pdev->dev); + } + +} + +static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, + u32 sw_event) +{ + struct dpu_encoder_virt *dpu_enc; + struct msm_drm_private *priv; + bool is_vid_mode = false; + + if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) { + DPU_ERROR("invalid parameters\n"); + return -EINVAL; + } + dpu_enc = to_dpu_encoder_virt(drm_enc); + priv = drm_enc->dev->dev_private; + is_vid_mode = !dpu_enc->disp_info.is_cmd_mode; + + /* + * when idle_pc is not supported, process only KICKOFF, STOP and MODESET + * events and return early for other events (ie wb display). + */ + if (!dpu_enc->idle_pc_supported && + (sw_event != DPU_ENC_RC_EVENT_KICKOFF && + sw_event != DPU_ENC_RC_EVENT_STOP && + sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) + return 0; + + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported, + dpu_enc->rc_state, "begin"); + + switch (sw_event) { + case DPU_ENC_RC_EVENT_KICKOFF: + /* cancel delayed off work, if any */ + if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) + DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", + sw_event); + + mutex_lock(&dpu_enc->rc_lock); + + /* return if the resource control is already in ON state */ + if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { + DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n", + DRMID(drm_enc), sw_event); + mutex_unlock(&dpu_enc->rc_lock); + return 0; + } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF && + dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) { + DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n", + DRMID(drm_enc), sw_event, + dpu_enc->rc_state); + mutex_unlock(&dpu_enc->rc_lock); + return -EINVAL; + } + + if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) + _dpu_encoder_irq_control(drm_enc, true); + else + _dpu_encoder_resource_control_helper(drm_enc, true); + + dpu_enc->rc_state = DPU_ENC_RC_STATE_ON; + + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, + dpu_enc->idle_pc_supported, dpu_enc->rc_state, + "kickoff"); + + mutex_unlock(&dpu_enc->rc_lock); + break; + + case DPU_ENC_RC_EVENT_FRAME_DONE: + /* + * mutex lock is not used as this event happens at interrupt + * context. And locking is not required as, the other events + * like KICKOFF and STOP does a wait-for-idle before executing + * the resource_control + */ + if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { + DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n", + DRMID(drm_enc), sw_event, + dpu_enc->rc_state); + return -EINVAL; + } + + /* + * schedule off work item only when there are no + * frames pending + */ + if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) { + DRM_DEBUG_KMS("id:%d skip schedule work\n", + DRMID(drm_enc)); + return 0; + } + + queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, + msecs_to_jiffies(dpu_enc->idle_timeout)); + + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, + dpu_enc->idle_pc_supported, dpu_enc->rc_state, + "frame done"); + break; + + case DPU_ENC_RC_EVENT_PRE_STOP: + /* cancel delayed off work, if any */ + if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) + DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", + sw_event); + + mutex_lock(&dpu_enc->rc_lock); + + if (is_vid_mode && + dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { + _dpu_encoder_irq_control(drm_enc, true); + } + /* skip if is already OFF or IDLE, resources are off already */ + else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF || + dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) { + DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n", + DRMID(drm_enc), sw_event, + dpu_enc->rc_state); + mutex_unlock(&dpu_enc->rc_lock); + return 0; + } + + dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF; + + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, + dpu_enc->idle_pc_supported, dpu_enc->rc_state, + "pre stop"); + + mutex_unlock(&dpu_enc->rc_lock); + break; + + case DPU_ENC_RC_EVENT_STOP: + mutex_lock(&dpu_enc->rc_lock); + + /* return if the resource control is already in OFF state */ + if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) { + DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n", + DRMID(drm_enc), sw_event); + mutex_unlock(&dpu_enc->rc_lock); + return 0; + } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) { + DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n", + DRMID(drm_enc), sw_event, dpu_enc->rc_state); + mutex_unlock(&dpu_enc->rc_lock); + return -EINVAL; + } + + /** + * expect to arrive here only if in either idle state or pre-off + * and in IDLE state the resources are already disabled + */ + if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF) + _dpu_encoder_resource_control_helper(drm_enc, false); + + dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF; + + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, + dpu_enc->idle_pc_supported, dpu_enc->rc_state, + "stop"); + + mutex_unlock(&dpu_enc->rc_lock); + break; + + case DPU_ENC_RC_EVENT_ENTER_IDLE: + mutex_lock(&dpu_enc->rc_lock); + + if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) { + DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n", + DRMID(drm_enc), sw_event, dpu_enc->rc_state); + mutex_unlock(&dpu_enc->rc_lock); + return 0; + } + + /* + * if we are in ON but a frame was just kicked off, + * ignore the IDLE event, it's probably a stale timer event + */ + if (dpu_enc->frame_busy_mask[0]) { + DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n", + DRMID(drm_enc), sw_event, dpu_enc->rc_state); + mutex_unlock(&dpu_enc->rc_lock); + return 0; + } + + if (is_vid_mode) + _dpu_encoder_irq_control(drm_enc, false); + else + _dpu_encoder_resource_control_helper(drm_enc, false); + + dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE; + + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, + dpu_enc->idle_pc_supported, dpu_enc->rc_state, + "idle"); + + mutex_unlock(&dpu_enc->rc_lock); + break; + + default: + DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc), + sw_event); + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, + dpu_enc->idle_pc_supported, dpu_enc->rc_state, + "error"); + break; + } + + trace_dpu_enc_rc(DRMID(drm_enc), sw_event, + dpu_enc->idle_pc_supported, dpu_enc->rc_state, + "end"); + return 0; +} + +void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, + struct drm_writeback_job *job) +{ + struct dpu_encoder_virt *dpu_enc; + int i; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->ops.prepare_wb_job) + phys->ops.prepare_wb_job(phys, job); + + } +} + +void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, + struct drm_writeback_job *job) +{ + struct dpu_encoder_virt *dpu_enc; + int i; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->ops.cleanup_wb_job) + phys->ops.cleanup_wb_job(phys, job); + + } +} + +static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct dpu_encoder_virt *dpu_enc; + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms; + struct dpu_crtc_state *cstate; + struct dpu_global_state *global_state; + struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; + struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; + int num_lm, num_ctl, num_pp, num_dsc; + unsigned int dsc_mask = 0; + int i; + + if (!drm_enc) { + DPU_ERROR("invalid encoder\n"); + return; + } + + dpu_enc = to_dpu_encoder_virt(drm_enc); + DPU_DEBUG_ENC(dpu_enc, "\n"); + + priv = drm_enc->dev->dev_private; + dpu_kms = to_dpu_kms(priv->kms); + + global_state = dpu_kms_get_existing_global_state(dpu_kms); + if (IS_ERR_OR_NULL(global_state)) { + DPU_ERROR("Failed to get global state"); + return; + } + + trace_dpu_enc_mode_set(DRMID(drm_enc)); + + /* Query resource that have been reserved in atomic check step. */ + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, + ARRAY_SIZE(hw_pp)); + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, + ARRAY_SIZE(hw_dspp)); + + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) + : NULL; + + if (dpu_enc->dsc) { + num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_DSC, + hw_dsc, ARRAY_SIZE(hw_dsc)); + for (i = 0; i < num_dsc; i++) { + dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); + dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); + } + } + + dpu_enc->dsc_mask = dsc_mask; + + cstate = to_dpu_crtc_state(crtc_state); + + for (i = 0; i < num_lm; i++) { + int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); + + cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); + cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); + cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); + } + + cstate->num_mixers = num_lm; + + dpu_enc->connector = conn_state->connector; + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (!dpu_enc->hw_pp[i]) { + DPU_ERROR_ENC(dpu_enc, + "no pp block assigned at idx: %d\n", i); + return; + } + + if (!hw_ctl[i]) { + DPU_ERROR_ENC(dpu_enc, + "no ctl block assigned at idx: %d\n", i); + return; + } + + phys->hw_pp = dpu_enc->hw_pp[i]; + phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]); + + phys->cached_mode = crtc_state->adjusted_mode; + if (phys->ops.atomic_mode_set) + phys->ops.atomic_mode_set(phys, crtc_state, conn_state); + } +} + +static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = NULL; + int i; + + if (!drm_enc || !drm_enc->dev) { + DPU_ERROR("invalid parameters\n"); + return; + } + + dpu_enc = to_dpu_encoder_virt(drm_enc); + if (!dpu_enc || !dpu_enc->cur_master) { + DPU_ERROR("invalid dpu encoder/master\n"); + return; + } + + + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS && + dpu_enc->cur_master->hw_mdptop && + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) + dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( + dpu_enc->cur_master->hw_mdptop); + + _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info); + + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && + !WARN_ON(dpu_enc->num_phys_encs == 0)) { + unsigned bpc = dpu_enc->connector->display_info.bpc; + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { + if (!dpu_enc->hw_pp[i]) + continue; + _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc); + } + } +} + +void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + + mutex_lock(&dpu_enc->enc_lock); + + if (!dpu_enc->enabled) + goto out; + + if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore) + dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); + if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore) + dpu_enc->cur_master->ops.restore(dpu_enc->cur_master); + + _dpu_encoder_virt_enable_helper(drm_enc); + +out: + mutex_unlock(&dpu_enc->enc_lock); +} + +static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = NULL; + int ret = 0; + struct drm_display_mode *cur_mode = NULL; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + mutex_lock(&dpu_enc->enc_lock); + cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; + + trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay, + cur_mode->vdisplay); + + /* always enable slave encoder before master */ + if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable) + dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave); + + if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable) + dpu_enc->cur_master->ops.enable(dpu_enc->cur_master); + + ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); + if (ret) { + DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n", + ret); + goto out; + } + + _dpu_encoder_virt_enable_helper(drm_enc); + + dpu_enc->enabled = true; + +out: + mutex_unlock(&dpu_enc->enc_lock); +} + +static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = NULL; + int i = 0; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + DPU_DEBUG_ENC(dpu_enc, "\n"); + + mutex_lock(&dpu_enc->enc_lock); + dpu_enc->enabled = false; + + trace_dpu_enc_disable(DRMID(drm_enc)); + + /* wait for idle */ + dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); + + dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->ops.disable) + phys->ops.disable(phys); + } + + + /* after phys waits for frame-done, should be no more frames pending */ + if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { + DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); + del_timer_sync(&dpu_enc->frame_done_timer); + } + + dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); + + dpu_enc->connector = NULL; + + DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); + + mutex_unlock(&dpu_enc->enc_lock); +} + +static enum dpu_intf dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog, + enum dpu_intf_type type, u32 controller_id) +{ + int i = 0; + + if (type == INTF_WB) + return INTF_MAX; + + for (i = 0; i < catalog->intf_count; i++) { + if (catalog->intf[i].type == type + && catalog->intf[i].controller_id == controller_id) { + return catalog->intf[i].id; + } + } + + return INTF_MAX; +} + +static enum dpu_wb dpu_encoder_get_wb(const struct dpu_mdss_cfg *catalog, + enum dpu_intf_type type, u32 controller_id) +{ + int i = 0; + + if (type != INTF_WB) + return WB_MAX; + + for (i = 0; i < catalog->wb_count; i++) { + if (catalog->wb[i].id == controller_id) + return catalog->wb[i].id; + } + + return WB_MAX; +} + +static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, + struct dpu_encoder_phys *phy_enc) +{ + struct dpu_encoder_virt *dpu_enc = NULL; + unsigned long lock_flags; + + if (!drm_enc || !phy_enc) + return; + + DPU_ATRACE_BEGIN("encoder_vblank_callback"); + dpu_enc = to_dpu_encoder_virt(drm_enc); + + atomic_inc(&phy_enc->vsync_cnt); + + spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); + if (dpu_enc->crtc) + dpu_crtc_vblank_callback(dpu_enc->crtc); + spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); + + DPU_ATRACE_END("encoder_vblank_callback"); +} + +static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc, + struct dpu_encoder_phys *phy_enc) +{ + if (!phy_enc) + return; + + DPU_ATRACE_BEGIN("encoder_underrun_callback"); + atomic_inc(&phy_enc->underrun_cnt); + + /* trigger dump only on the first underrun */ + if (atomic_read(&phy_enc->underrun_cnt) == 1) + msm_disp_snapshot_state(drm_enc->dev); + + trace_dpu_enc_underrun_cb(DRMID(drm_enc), + atomic_read(&phy_enc->underrun_cnt)); + DPU_ATRACE_END("encoder_underrun_callback"); +} + +void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + unsigned long lock_flags; + + spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); + /* crtc should always be cleared before re-assigning */ + WARN_ON(crtc && dpu_enc->crtc); + dpu_enc->crtc = crtc; + spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); +} + +void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, + struct drm_crtc *crtc, bool enable) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + unsigned long lock_flags; + int i; + + trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable); + + spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); + if (dpu_enc->crtc != crtc) { + spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); + return; + } + spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->ops.control_vblank_irq) + phys->ops.control_vblank_irq(phys, enable); + } +} + +void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, + void (*frame_event_cb)(void *, u32 event), + void *frame_event_cb_data) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + unsigned long lock_flags; + bool enable; + + enable = frame_event_cb ? true : false; + + if (!drm_enc) { + DPU_ERROR("invalid encoder\n"); + return; + } + trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable); + + spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); + dpu_enc->crtc_frame_event_cb = frame_event_cb; + dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data; + spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); +} + +static void dpu_encoder_frame_done_callback( + struct drm_encoder *drm_enc, + struct dpu_encoder_phys *ready_phys, u32 event) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + unsigned int i; + + if (event & (DPU_ENCODER_FRAME_EVENT_DONE + | DPU_ENCODER_FRAME_EVENT_ERROR + | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) { + + if (!dpu_enc->frame_busy_mask[0]) { + /** + * suppress frame_done without waiter, + * likely autorefresh + */ + trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event, + dpu_encoder_helper_get_intf_type(ready_phys->intf_mode), + ready_phys->intf_idx, ready_phys->wb_idx); + return; + } + + /* One of the physical encoders has become idle */ + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + if (dpu_enc->phys_encs[i] == ready_phys) { + trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i, + dpu_enc->frame_busy_mask[0]); + clear_bit(i, dpu_enc->frame_busy_mask); + } + } + + if (!dpu_enc->frame_busy_mask[0]) { + atomic_set(&dpu_enc->frame_done_timeout_ms, 0); + del_timer(&dpu_enc->frame_done_timer); + + dpu_encoder_resource_control(drm_enc, + DPU_ENC_RC_EVENT_FRAME_DONE); + + if (dpu_enc->crtc_frame_event_cb) + dpu_enc->crtc_frame_event_cb( + dpu_enc->crtc_frame_event_cb_data, + event); + } + } else { + if (dpu_enc->crtc_frame_event_cb) + dpu_enc->crtc_frame_event_cb( + dpu_enc->crtc_frame_event_cb_data, event); + } +} + +static void dpu_encoder_off_work(struct work_struct *work) +{ + struct dpu_encoder_virt *dpu_enc = container_of(work, + struct dpu_encoder_virt, delayed_off_work.work); + + dpu_encoder_resource_control(&dpu_enc->base, + DPU_ENC_RC_EVENT_ENTER_IDLE); + + dpu_encoder_frame_done_callback(&dpu_enc->base, NULL, + DPU_ENCODER_FRAME_EVENT_IDLE); +} + +/** + * _dpu_encoder_trigger_flush - trigger flush for a physical encoder + * @drm_enc: Pointer to drm encoder structure + * @phys: Pointer to physical encoder structure + * @extra_flush_bits: Additional bit mask to include in flush trigger + */ +static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, + struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) +{ + struct dpu_hw_ctl *ctl; + int pending_kickoff_cnt; + u32 ret = UINT_MAX; + + if (!phys->hw_pp) { + DPU_ERROR("invalid pingpong hw\n"); + return; + } + + ctl = phys->hw_ctl; + if (!ctl->ops.trigger_flush) { + DPU_ERROR("missing trigger cb\n"); + return; + } + + pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); + + if (extra_flush_bits && ctl->ops.update_pending_flush) + ctl->ops.update_pending_flush(ctl, extra_flush_bits); + + ctl->ops.trigger_flush(ctl); + + if (ctl->ops.get_pending_flush) + ret = ctl->ops.get_pending_flush(ctl); + + trace_dpu_enc_trigger_flush(DRMID(drm_enc), + dpu_encoder_helper_get_intf_type(phys->intf_mode), + phys->intf_idx, phys->wb_idx, + pending_kickoff_cnt, ctl->idx, + extra_flush_bits, ret); +} + +/** + * _dpu_encoder_trigger_start - trigger start for a physical encoder + * @phys: Pointer to physical encoder structure + */ +static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) +{ + if (!phys) { + DPU_ERROR("invalid argument(s)\n"); + return; + } + + if (!phys->hw_pp) { + DPU_ERROR("invalid pingpong hw\n"); + return; + } + + if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) + phys->ops.trigger_start(phys); +} + +void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_ctl *ctl; + + ctl = phys_enc->hw_ctl; + if (ctl->ops.trigger_start) { + ctl->ops.trigger_start(ctl); + trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx); + } +} + +static int dpu_encoder_helper_wait_event_timeout( + int32_t drm_id, + u32 irq_idx, + struct dpu_encoder_wait_info *info) +{ + int rc = 0; + s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms; + s64 jiffies = msecs_to_jiffies(info->timeout_ms); + s64 time; + + do { + rc = wait_event_timeout(*(info->wq), + atomic_read(info->atomic_cnt) == 0, jiffies); + time = ktime_to_ms(ktime_get()); + + trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time, + expected_time, + atomic_read(info->atomic_cnt)); + /* If we timed out, counter is valid and time is less, wait again */ + } while (atomic_read(info->atomic_cnt) && (rc == 0) && + (time < expected_time)); + + return rc; +} + +static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_virt *dpu_enc; + struct dpu_hw_ctl *ctl; + int rc; + struct drm_encoder *drm_enc; + + dpu_enc = to_dpu_encoder_virt(phys_enc->parent); + ctl = phys_enc->hw_ctl; + drm_enc = phys_enc->parent; + + if (!ctl->ops.reset) + return; + + DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc), + ctl->idx); + + rc = ctl->ops.reset(ctl); + if (rc) { + DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx); + msm_disp_snapshot_state(drm_enc->dev); + } + + phys_enc->enable_state = DPU_ENC_ENABLED; +} + +/** + * _dpu_encoder_kickoff_phys - handle physical encoder kickoff + * Iterate through the physical encoders and perform consolidated flush + * and/or control start triggering as needed. This is done in the virtual + * encoder rather than the individual physical ones in order to handle + * use cases that require visibility into multiple physical encoders at + * a time. + * @dpu_enc: Pointer to virtual encoder structure + */ +static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc) +{ + struct dpu_hw_ctl *ctl; + uint32_t i, pending_flush; + unsigned long lock_flags; + + pending_flush = 0x0; + + /* update pending counts and trigger kickoff ctl flush atomically */ + spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); + + /* don't perform flush/start operations for slave encoders */ + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + if (phys->enable_state == DPU_ENC_DISABLED) + continue; + + ctl = phys->hw_ctl; + + /* + * This is cleared in frame_done worker, which isn't invoked + * for async commits. So don't set this for async, since it'll + * roll over to the next commit. + */ + if (phys->split_role != ENC_ROLE_SLAVE) + set_bit(i, dpu_enc->frame_busy_mask); + + if (!phys->ops.needs_single_flush || + !phys->ops.needs_single_flush(phys)) + _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0); + else if (ctl->ops.get_pending_flush) + pending_flush |= ctl->ops.get_pending_flush(ctl); + } + + /* for split flush, combine pending flush masks and send to master */ + if (pending_flush && dpu_enc->cur_master) { + _dpu_encoder_trigger_flush( + &dpu_enc->base, + dpu_enc->cur_master, + pending_flush); + } + + _dpu_encoder_trigger_start(dpu_enc->cur_master); + + spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags); +} + +void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + struct dpu_encoder_phys *phys; + unsigned int i; + struct dpu_hw_ctl *ctl; + struct msm_display_info *disp_info; + + if (!drm_enc) { + DPU_ERROR("invalid encoder\n"); + return; + } + dpu_enc = to_dpu_encoder_virt(drm_enc); + disp_info = &dpu_enc->disp_info; + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + phys = dpu_enc->phys_encs[i]; + + ctl = phys->hw_ctl; + if (ctl->ops.clear_pending_flush) + ctl->ops.clear_pending_flush(ctl); + + /* update only for command mode primary ctl */ + if ((phys == dpu_enc->cur_master) && + disp_info->is_cmd_mode + && ctl->ops.trigger_pending) + ctl->ops.trigger_pending(ctl); + } +} + +static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc, + struct drm_display_mode *mode) +{ + u64 pclk_rate; + u32 pclk_period; + u32 line_time; + + /* + * For linetime calculation, only operate on master encoder. + */ + if (!dpu_enc->cur_master) + return 0; + + if (!dpu_enc->cur_master->ops.get_line_count) { + DPU_ERROR("get_line_count function not defined\n"); + return 0; + } + + pclk_rate = mode->clock; /* pixel clock in kHz */ + if (pclk_rate == 0) { + DPU_ERROR("pclk is 0, cannot calculate line time\n"); + return 0; + } + + pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); + if (pclk_period == 0) { + DPU_ERROR("pclk period is 0\n"); + return 0; + } + + /* + * Line time calculation based on Pixel clock and HTOTAL. + * Final unit is in ns. + */ + line_time = (pclk_period * mode->htotal) / 1000; + if (line_time == 0) { + DPU_ERROR("line time calculation is 0\n"); + return 0; + } + + DPU_DEBUG_ENC(dpu_enc, + "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n", + pclk_rate, pclk_period, line_time); + + return line_time; +} + +int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time) +{ + struct drm_display_mode *mode; + struct dpu_encoder_virt *dpu_enc; + u32 cur_line; + u32 line_time; + u32 vtotal, time_to_vsync; + ktime_t cur_time; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + if (!drm_enc->crtc || !drm_enc->crtc->state) { + DPU_ERROR("crtc/crtc state object is NULL\n"); + return -EINVAL; + } + mode = &drm_enc->crtc->state->adjusted_mode; + + line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); + if (!line_time) + return -EINVAL; + + cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master); + + vtotal = mode->vtotal; + if (cur_line >= vtotal) + time_to_vsync = line_time * vtotal; + else + time_to_vsync = line_time * (vtotal - cur_line); + + if (time_to_vsync == 0) { + DPU_ERROR("time to vsync should not be zero, vtotal=%d\n", + vtotal); + return -EINVAL; + } + + cur_time = ktime_get(); + *wakeup_time = ktime_add_ns(cur_time, time_to_vsync); + + DPU_DEBUG_ENC(dpu_enc, + "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n", + cur_line, vtotal, time_to_vsync, + ktime_to_ms(cur_time), + ktime_to_ms(*wakeup_time)); + return 0; +} + +static void dpu_encoder_vsync_event_handler(struct timer_list *t) +{ + struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, + vsync_event_timer); + struct drm_encoder *drm_enc = &dpu_enc->base; + struct msm_drm_private *priv; + struct msm_drm_thread *event_thread; + + if (!drm_enc->dev || !drm_enc->crtc) { + DPU_ERROR("invalid parameters\n"); + return; + } + + priv = drm_enc->dev->dev_private; + + if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) { + DPU_ERROR("invalid crtc index\n"); + return; + } + event_thread = &priv->event_thread[drm_enc->crtc->index]; + if (!event_thread) { + DPU_ERROR("event_thread not found for crtc:%d\n", + drm_enc->crtc->index); + return; + } + + del_timer(&dpu_enc->vsync_event_timer); +} + +static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) +{ + struct dpu_encoder_virt *dpu_enc = container_of(work, + struct dpu_encoder_virt, vsync_event_work); + ktime_t wakeup_time; + + if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time)) + return; + + trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time); + mod_timer(&dpu_enc->vsync_event_timer, + nsecs_to_jiffies(ktime_to_ns(wakeup_time))); +} + +static u32 +dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, + u32 enc_ip_width) +{ + int ssm_delay, total_pixels, soft_slice_per_enc; + + soft_slice_per_enc = enc_ip_width / dsc->slice_width; + + /* + * minimum number of initial line pixels is a sum of: + * 1. sub-stream multiplexer delay (83 groups for 8bpc, + * 91 for 10 bpc) * 3 + * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 + * 3. the initial xmit delay + * 4. total pipeline delay through the "lock step" of encoder (47) + * 5. 6 additional pixels as the output of the rate buffer is + * 48 bits wide + */ + ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); + total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; + if (soft_slice_per_enc > 1) + total_pixels += (ssm_delay * 3); + return DIV_ROUND_UP(total_pixels, dsc->slice_width); +} + +static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc, + struct dpu_hw_pingpong *hw_pp, + struct drm_dsc_config *dsc, + u32 common_mode, + u32 initial_lines) +{ + if (hw_dsc->ops.dsc_config) + hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines); + + if (hw_dsc->ops.dsc_config_thresh) + hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc); + + if (hw_pp->ops.setup_dsc) + hw_pp->ops.setup_dsc(hw_pp); + + if (hw_pp->ops.enable_dsc) + hw_pp->ops.enable_dsc(hw_pp); +} + +static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, + struct drm_dsc_config *dsc) +{ + /* coding only for 2LM, 2enc, 1 dsc config */ + struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; + struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; + int this_frame_slices; + int intf_ip_w, enc_ip_w; + int dsc_common_mode; + int pic_width; + u32 initial_lines; + int i; + + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { + hw_pp[i] = dpu_enc->hw_pp[i]; + hw_dsc[i] = dpu_enc->hw_dsc[i]; + + if (!hw_pp[i] || !hw_dsc[i]) { + DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); + return; + } + } + + dsc_common_mode = 0; + pic_width = dsc->pic_width; + + dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL; + if (enc_master->intf_mode == INTF_MODE_VIDEO) + dsc_common_mode |= DSC_MODE_VIDEO; + + this_frame_slices = pic_width / dsc->slice_width; + intf_ip_w = this_frame_slices * dsc->slice_width; + + /* + * dsc merge case: when using 2 encoders for the same stream, + * no. of slices need to be same on both the encoders. + */ + enc_ip_w = intf_ip_w / 2; + initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); + + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines); +} + +void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + struct dpu_encoder_phys *phys; + bool needs_hw_reset = false; + unsigned int i; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + trace_dpu_enc_prepare_kickoff(DRMID(drm_enc)); + + /* prepare for next kickoff, may include waiting on previous kickoff */ + DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + phys = dpu_enc->phys_encs[i]; + if (phys->ops.prepare_for_kickoff) + phys->ops.prepare_for_kickoff(phys); + if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) + needs_hw_reset = true; + } + DPU_ATRACE_END("enc_prepare_for_kickoff"); + + dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF); + + /* if any phys needs reset, reset all phys, in-order */ + if (needs_hw_reset) { + trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]); + } + } + + if (dpu_enc->dsc) + dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc); +} + +bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + unsigned int i; + struct dpu_encoder_phys *phys; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + + if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + phys = dpu_enc->phys_encs[i]; + if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) { + DPU_DEBUG("invalid FB not kicking off\n"); + return false; + } + } + } + + return true; +} + +void dpu_encoder_kickoff(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + struct dpu_encoder_phys *phys; + ktime_t wakeup_time; + unsigned long timeout_ms; + unsigned int i; + + DPU_ATRACE_BEGIN("encoder_kickoff"); + dpu_enc = to_dpu_encoder_virt(drm_enc); + + trace_dpu_enc_kickoff(DRMID(drm_enc)); + + timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / + drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); + + atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); + mod_timer(&dpu_enc->frame_done_timer, + jiffies + msecs_to_jiffies(timeout_ms)); + + /* All phys encs are ready to go, trigger the kickoff */ + _dpu_encoder_kickoff_phys(dpu_enc); + + /* allow phys encs to handle any post-kickoff business */ + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + phys = dpu_enc->phys_encs[i]; + if (phys->ops.handle_post_kickoff) + phys->ops.handle_post_kickoff(phys); + } + + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI && + !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) { + trace_dpu_enc_early_kickoff(DRMID(drm_enc), + ktime_to_ms(wakeup_time)); + mod_timer(&dpu_enc->vsync_event_timer, + nsecs_to_jiffies(ktime_to_ns(wakeup_time))); + } + + DPU_ATRACE_END("encoder_kickoff"); +} + +static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_mixer_cfg mixer; + int i, num_lm; + struct dpu_global_state *global_state; + struct dpu_hw_blk *hw_lm[2]; + struct dpu_hw_mixer *hw_mixer[2]; + struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; + + memset(&mixer, 0, sizeof(mixer)); + + /* reset all mixers for this encoder */ + if (phys_enc->hw_ctl->ops.clear_all_blendstages) + phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl); + + global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); + + num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, + phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + + for (i = 0; i < num_lm; i++) { + hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); + if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) + phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); + + /* clear all blendstages */ + if (phys_enc->hw_ctl->ops.setup_blendstage) + phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); + } +} + +void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; + struct dpu_hw_intf_cfg intf_cfg = { 0 }; + int i; + struct dpu_encoder_virt *dpu_enc; + + dpu_enc = to_dpu_encoder_virt(phys_enc->parent); + + phys_enc->hw_ctl->ops.reset(ctl); + + dpu_encoder_helper_reset_mixers(phys_enc); + + /* + * TODO: move the once-only operation like CTL flush/trigger + * into dpu_encoder_virt_disable() and all operations which need + * to be done per phys encoder into the phys_disable() op. + */ + if (phys_enc->hw_wb) { + /* disable the PP block */ + if (phys_enc->hw_wb->ops.bind_pingpong_blk) + phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false, + phys_enc->hw_pp->idx); + + /* mark WB flush as pending */ + if (phys_enc->hw_ctl->ops.update_pending_flush_wb) + phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); + } else { + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) + phys_enc->hw_intf->ops.bind_pingpong_blk( + dpu_enc->phys_encs[i]->hw_intf, false, + dpu_enc->phys_encs[i]->hw_pp->idx); + + /* mark INTF flush as pending */ + if (phys_enc->hw_ctl->ops.update_pending_flush_intf) + phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl, + dpu_enc->phys_encs[i]->hw_intf->idx); + } + } + + /* reset the merge 3D HW block */ + if (phys_enc->hw_pp->merge_3d) { + phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, + BLEND_3D_NONE); + if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) + phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl, + phys_enc->hw_pp->merge_3d->idx); + } + + intf_cfg.stream_sel = 0; /* Don't care value for video mode */ + intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); + + if (phys_enc->hw_intf) + intf_cfg.intf = phys_enc->hw_intf->idx; + if (phys_enc->hw_wb) + intf_cfg.wb = phys_enc->hw_wb->idx; + + if (phys_enc->hw_pp->merge_3d) + intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; + + if (ctl->ops.reset_intf_cfg) + ctl->ops.reset_intf_cfg(ctl, &intf_cfg); + + ctl->ops.trigger_flush(ctl); + ctl->ops.trigger_start(ctl); + ctl->ops.clear_pending_flush(ctl); +} + +void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + struct dpu_encoder_phys *phys; + int i; + + if (!drm_enc) { + DPU_ERROR("invalid encoder\n"); + return; + } + dpu_enc = to_dpu_encoder_virt(drm_enc); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + phys = dpu_enc->phys_encs[i]; + if (phys->ops.prepare_commit) + phys->ops.prepare_commit(phys); + } +} + +#ifdef CONFIG_DEBUG_FS +static int _dpu_encoder_status_show(struct seq_file *s, void *data) +{ + struct dpu_encoder_virt *dpu_enc = s->private; + int i; + + mutex_lock(&dpu_enc->enc_lock); + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ", + phys->intf_idx - INTF_0, phys->wb_idx - WB_0, + atomic_read(&phys->vsync_cnt), + atomic_read(&phys->underrun_cnt)); + + seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode)); + } + mutex_unlock(&dpu_enc->enc_lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status); + +static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + int i; + + char name[DPU_NAME_SIZE]; + + if (!drm_enc->dev) { + DPU_ERROR("invalid encoder or kms\n"); + return -EINVAL; + } + + snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id); + + /* create overall sub-directory for the encoder */ + dpu_enc->debugfs_root = debugfs_create_dir(name, + drm_enc->dev->primary->debugfs_root); + + /* don't error check these */ + debugfs_create_file("status", 0600, + dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) + if (dpu_enc->phys_encs[i]->ops.late_register) + dpu_enc->phys_encs[i]->ops.late_register( + dpu_enc->phys_encs[i], + dpu_enc->debugfs_root); + + return 0; +} +#else +static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc) +{ + return 0; +} +#endif + +static int dpu_encoder_late_register(struct drm_encoder *encoder) +{ + return _dpu_encoder_init_debugfs(encoder); +} + +static void dpu_encoder_early_unregister(struct drm_encoder *encoder) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); + + debugfs_remove_recursive(dpu_enc->debugfs_root); +} + +static int dpu_encoder_virt_add_phys_encs( + struct msm_display_info *disp_info, + struct dpu_encoder_virt *dpu_enc, + struct dpu_enc_phys_init_params *params) +{ + struct dpu_encoder_phys *enc = NULL; + + DPU_DEBUG_ENC(dpu_enc, "\n"); + + /* + * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types + * in this function, check up-front. + */ + if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >= + ARRAY_SIZE(dpu_enc->phys_encs)) { + DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n", + dpu_enc->num_phys_encs); + return -EINVAL; + } + + + if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) { + enc = dpu_encoder_phys_wb_init(params); + + if (IS_ERR(enc)) { + DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n", + PTR_ERR(enc)); + return PTR_ERR(enc); + } + + dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; + ++dpu_enc->num_phys_encs; + } else if (disp_info->is_cmd_mode) { + enc = dpu_encoder_phys_cmd_init(params); + + if (IS_ERR(enc)) { + DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n", + PTR_ERR(enc)); + return PTR_ERR(enc); + } + + dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; + ++dpu_enc->num_phys_encs; + } else { + enc = dpu_encoder_phys_vid_init(params); + + if (IS_ERR(enc)) { + DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n", + PTR_ERR(enc)); + return PTR_ERR(enc); + } + + dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc; + ++dpu_enc->num_phys_encs; + } + + if (params->split_role == ENC_ROLE_SLAVE) + dpu_enc->cur_slave = enc; + else + dpu_enc->cur_master = enc; + + return 0; +} + +static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = { + .handle_vblank_virt = dpu_encoder_vblank_callback, + .handle_underrun_virt = dpu_encoder_underrun_callback, + .handle_frame_done = dpu_encoder_frame_done_callback, +}; + +static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, + struct dpu_kms *dpu_kms, + struct msm_display_info *disp_info) +{ + int ret = 0; + int i = 0; + enum dpu_intf_type intf_type = INTF_NONE; + struct dpu_enc_phys_init_params phys_params; + + if (!dpu_enc) { + DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL); + return -EINVAL; + } + + dpu_enc->cur_master = NULL; + + memset(&phys_params, 0, sizeof(phys_params)); + phys_params.dpu_kms = dpu_kms; + phys_params.parent = &dpu_enc->base; + phys_params.parent_ops = &dpu_encoder_parent_ops; + phys_params.enc_spinlock = &dpu_enc->enc_spinlock; + + switch (disp_info->intf_type) { + case DRM_MODE_ENCODER_DSI: + intf_type = INTF_DSI; + break; + case DRM_MODE_ENCODER_TMDS: + intf_type = INTF_DP; + break; + case DRM_MODE_ENCODER_VIRTUAL: + intf_type = INTF_WB; + break; + } + + WARN_ON(disp_info->num_of_h_tiles < 1); + + DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles); + + if (disp_info->intf_type != DRM_MODE_ENCODER_VIRTUAL) + dpu_enc->idle_pc_supported = + dpu_kms->catalog->caps->has_idle_pc; + + dpu_enc->dsc = disp_info->dsc; + + mutex_lock(&dpu_enc->enc_lock); + for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { + /* + * Left-most tile is at index 0, content is controller id + * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right + * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right + */ + u32 controller_id = disp_info->h_tile_instance[i]; + + if (disp_info->num_of_h_tiles > 1) { + if (i == 0) + phys_params.split_role = ENC_ROLE_MASTER; + else + phys_params.split_role = ENC_ROLE_SLAVE; + } else { + phys_params.split_role = ENC_ROLE_SOLO; + } + + DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n", + i, controller_id, phys_params.split_role); + + phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog, + intf_type, + controller_id); + + phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog, + intf_type, controller_id); + /* + * The phys_params might represent either an INTF or a WB unit, but not + * both of them at the same time. + */ + if ((phys_params.intf_idx == INTF_MAX) && + (phys_params.wb_idx == WB_MAX)) { + DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n", + intf_type, controller_id); + ret = -EINVAL; + } + + if ((phys_params.intf_idx != INTF_MAX) && + (phys_params.wb_idx != WB_MAX)) { + DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n", + intf_type, controller_id); + ret = -EINVAL; + } + + if (!ret) { + ret = dpu_encoder_virt_add_phys_encs(disp_info, + dpu_enc, &phys_params); + if (ret) + DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n"); + } + } + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + atomic_set(&phys->vsync_cnt, 0); + atomic_set(&phys->underrun_cnt, 0); + + if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX) + phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx); + + if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX) + phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx); + + if (!phys->hw_intf && !phys->hw_wb) { + DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i); + ret = -EINVAL; + } + + if (phys->hw_intf && phys->hw_wb) { + DPU_ERROR_ENC(dpu_enc, + "invalid phys both intf and wb block at idx: %d\n", i); + ret = -EINVAL; + } + } + + mutex_unlock(&dpu_enc->enc_lock); + + return ret; +} + +static void dpu_encoder_frame_done_timeout(struct timer_list *t) +{ + struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t, + frame_done_timer); + struct drm_encoder *drm_enc = &dpu_enc->base; + u32 event; + + if (!drm_enc->dev) { + DPU_ERROR("invalid parameters\n"); + return; + } + + if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) { + DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n", + DRMID(drm_enc), dpu_enc->frame_busy_mask[0]); + return; + } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { + DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc)); + return; + } + + DPU_ERROR_ENC(dpu_enc, "frame done timeout\n"); + + event = DPU_ENCODER_FRAME_EVENT_ERROR; + trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event); + dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event); +} + +static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { + .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, + .disable = dpu_encoder_virt_disable, + .enable = dpu_encoder_virt_enable, + .atomic_check = dpu_encoder_virt_atomic_check, +}; + +static const struct drm_encoder_funcs dpu_encoder_funcs = { + .destroy = dpu_encoder_destroy, + .late_register = dpu_encoder_late_register, + .early_unregister = dpu_encoder_early_unregister, +}; + +int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, + struct msm_display_info *disp_info) +{ + struct msm_drm_private *priv = dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_encoder *drm_enc = NULL; + struct dpu_encoder_virt *dpu_enc = NULL; + int ret = 0; + + dpu_enc = to_dpu_encoder_virt(enc); + + ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info); + if (ret) + goto fail; + + atomic_set(&dpu_enc->frame_done_timeout_ms, 0); + timer_setup(&dpu_enc->frame_done_timer, + dpu_encoder_frame_done_timeout, 0); + + if (disp_info->intf_type == DRM_MODE_ENCODER_DSI) + timer_setup(&dpu_enc->vsync_event_timer, + dpu_encoder_vsync_event_handler, + 0); + else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS) + dpu_enc->wide_bus_en = msm_dp_wide_bus_available( + priv->dp[disp_info->h_tile_instance[0]]); + + INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, + dpu_encoder_off_work); + dpu_enc->idle_timeout = IDLE_TIMEOUT; + + kthread_init_work(&dpu_enc->vsync_event_work, + dpu_encoder_vsync_event_work_handler); + + memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info)); + + DPU_DEBUG_ENC(dpu_enc, "created\n"); + + return ret; + +fail: + DPU_ERROR("failed to create encoder\n"); + if (drm_enc) + dpu_encoder_destroy(drm_enc); + + return ret; + + +} + +struct drm_encoder *dpu_encoder_init(struct drm_device *dev, + int drm_enc_mode) +{ + struct dpu_encoder_virt *dpu_enc = NULL; + int rc = 0; + + dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL); + if (!dpu_enc) + return ERR_PTR(-ENOMEM); + + + rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs, + drm_enc_mode, NULL); + if (rc) { + devm_kfree(dev->dev, dpu_enc); + return ERR_PTR(rc); + } + + drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); + + spin_lock_init(&dpu_enc->enc_spinlock); + dpu_enc->enabled = false; + mutex_init(&dpu_enc->enc_lock); + mutex_init(&dpu_enc->rc_lock); + + return &dpu_enc->base; +} + +int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc, + enum msm_event_wait event) +{ + int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL; + struct dpu_encoder_virt *dpu_enc = NULL; + int i, ret = 0; + + if (!drm_enc) { + DPU_ERROR("invalid encoder\n"); + return -EINVAL; + } + dpu_enc = to_dpu_encoder_virt(drm_enc); + DPU_DEBUG_ENC(dpu_enc, "\n"); + + for (i = 0; i < dpu_enc->num_phys_encs; i++) { + struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i]; + + switch (event) { + case MSM_ENC_COMMIT_DONE: + fn_wait = phys->ops.wait_for_commit_done; + break; + case MSM_ENC_TX_COMPLETE: + fn_wait = phys->ops.wait_for_tx_complete; + break; + case MSM_ENC_VBLANK: + fn_wait = phys->ops.wait_for_vblank; + break; + default: + DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n", + event); + return -EINVAL; + } + + if (fn_wait) { + DPU_ATRACE_BEGIN("wait_for_completion_event"); + ret = fn_wait(phys); + DPU_ATRACE_END("wait_for_completion_event"); + if (ret) + return ret; + } + } + + return ret; +} + +enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) +{ + struct dpu_encoder_virt *dpu_enc = NULL; + + if (!encoder) { + DPU_ERROR("invalid encoder\n"); + return INTF_MODE_NONE; + } + dpu_enc = to_dpu_encoder_virt(encoder); + + if (dpu_enc->cur_master) + return dpu_enc->cur_master->intf_mode; + + if (dpu_enc->num_phys_encs) + return dpu_enc->phys_encs[0]->intf_mode; + + return INTF_MODE_NONE; +} + +unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc) +{ + struct drm_encoder *encoder = phys_enc->parent; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); + + return dpu_enc->dsc_mask; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h new file mode 100644 index 000000000..9e7236ef3 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __DPU_ENCODER_H__ +#define __DPU_ENCODER_H__ + +#include +#include "dpu_hw_mdss.h" + +#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0) +#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1) +#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2) +#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3) + +#define IDLE_TIMEOUT (66 - 16/2) + +/** + * struct msm_display_info - defines display properties + * @intf_type: DRM_MODE_ENCODER_ type + * @num_of_h_tiles: Number of horizontal tiles in case of split interface + * @h_tile_instance: Controller instance used per tile. Number of elements is + * based on num_of_h_tiles + * @is_cmd_mode Boolean to indicate if the CMD mode is requested + * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is + * used instead of panel TE in cmd mode panels + * @dsc: DSC configuration data for DSC-enabled displays + */ +struct msm_display_info { + int intf_type; + uint32_t num_of_h_tiles; + uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY]; + bool is_cmd_mode; + bool is_te_using_watchdog_timer; + struct drm_dsc_config *dsc; +}; + +/** + * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to + * @encoder: encoder pointer + * @crtc: crtc pointer + */ +void dpu_encoder_assign_crtc(struct drm_encoder *encoder, + struct drm_crtc *crtc); + +/** + * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if + * the encoder is assigned to the given crtc + * @encoder: encoder pointer + * @crtc: crtc pointer + * @enable: true if vblank should be enabled + */ +void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder, + struct drm_crtc *crtc, bool enable); + +/** + * dpu_encoder_register_frame_event_callback - provide callback to encoder that + * will be called after the request is complete, or other events. + * @encoder: encoder pointer + * @cb: callback pointer, provide NULL to deregister + * @data: user data provided to callback + */ +void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder, + void (*cb)(void *, u32), void *data); + +/** + * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl + * path (i.e. ctl flush and start) at next appropriate time. + * Immediately: if no previous commit is outstanding. + * Delayed: Block until next trigger can be issued. + * @encoder: encoder pointer + */ +void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder); + +/** + * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous + * kickoff and trigger the ctl prepare progress for command mode display. + * @encoder: encoder pointer + */ +void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder); + +/** + * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path + * (i.e. ctl flush and start) immediately. + * @encoder: encoder pointer + */ +void dpu_encoder_kickoff(struct drm_encoder *encoder); + +/** + * dpu_encoder_wakeup_time - get the time of the next vsync + */ +int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time); + +/** + * dpu_encoder_wait_for_event - Waits for encoder events + * @encoder: encoder pointer + * @event: event to wait for + * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending + * frames to hardware at a vblank or ctl_start + * Encoders will map this differently depending on the + * panel type. + * vid mode -> vsync_irq + * cmd mode -> ctl_start + * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to + * the panel. Encoders will map this differently + * depending on the panel type. + * vid mode -> vsync_irq + * cmd mode -> pp_done + * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise + */ +int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder, + enum msm_event_wait event); + +/* + * dpu_encoder_get_intf_mode - get interface mode of the given encoder + * @encoder: Pointer to drm encoder object + */ +enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder); + +/** + * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs + * @encoder: encoder pointer + */ +void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder); + +/** + * dpu_encoder_init - initialize virtual encoder object + * @dev: Pointer to drm device structure + * @disp_info: Pointer to display information structure + * Returns: Pointer to newly created drm encoder + */ +struct drm_encoder *dpu_encoder_init( + struct drm_device *dev, + int drm_enc_mode); + +/** + * dpu_encoder_setup - setup dpu_encoder for the display probed + * @dev: Pointer to drm device structure + * @enc: Pointer to the drm_encoder + * @disp_info: Pointer to the display info + */ +int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, + struct msm_display_info *disp_info); + +/** + * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an + * atomic commit, before any registers are written + * @drm_enc: Pointer to previously created drm encoder structure + */ +void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc); + +/** + * dpu_encoder_set_idle_timeout - set the idle timeout for video + * and command mode encoders. + * @drm_enc: Pointer to previously created drm encoder structure + * @idle_timeout: idle timeout duration in milliseconds + */ +void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc, + u32 idle_timeout); +/** + * dpu_encoder_get_linecount - get interface line count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ +int dpu_encoder_get_linecount(struct drm_encoder *drm_enc); + +/** + * dpu_encoder_get_vsync_count - get vsync count for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + */ +int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc); + +bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc); + +/** + * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained + * in virtual encoder that can collect CRC values + * @drm_enc: Pointer to previously created drm encoder structure + * Returns: Number of physical encoders for given drm encoder + */ +int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc); + +/** + * dpu_encoder_setup_misr - enable misr calculations + * @drm_enc: Pointer to previously created drm encoder structure + */ +void dpu_encoder_setup_misr(const struct drm_encoder *drm_encoder); + +/** + * dpu_encoder_get_crc - get the crc value from interface blocks + * @drm_enc: Pointer to previously created drm encoder structure + * Returns: 0 on success, error otherwise + */ +int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos); + +/** + * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. + * @drm_enc: Pointer to previously created drm encoder structure + */ +bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc); + +/** + * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ +void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc, + struct drm_writeback_job *job); + +/** + * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder. + * @drm_enc: Pointer to previously created drm encoder structure + * @job: Pointer to the current drm writeback job + */ +void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc, + struct drm_writeback_job *job); + +/** + * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. + * @drm_enc: Pointer to drm encoder structure + */ +bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc); + +#endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h new file mode 100644 index 000000000..f2af07d87 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + */ + +#ifndef __DPU_ENCODER_PHYS_H__ +#define __DPU_ENCODER_PHYS_H__ + +#include +#include + +#include "dpu_kms.h" +#include "dpu_hw_intf.h" +#include "dpu_hw_wb.h" +#include "dpu_hw_pingpong.h" +#include "dpu_hw_ctl.h" +#include "dpu_hw_top.h" +#include "dpu_encoder.h" +#include "dpu_crtc.h" + +#define DPU_ENCODER_NAME_MAX 16 + +/* wait for at most 2 vsync for lowest refresh rate (24hz) */ +#define KICKOFF_TIMEOUT_MS 84 +#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS) + +/** + * enum dpu_enc_split_role - Role this physical encoder will play in a + * split-panel configuration, where one panel is master, and others slaves. + * Masters have extra responsibilities, like managing the VBLANK IRQ. + * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master. + * @ENC_ROLE_MASTER: This encoder is the master of a split panel config. + * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config. + */ +enum dpu_enc_split_role { + ENC_ROLE_SOLO, + ENC_ROLE_MASTER, + ENC_ROLE_SLAVE, +}; + +/** + * enum dpu_enc_enable_state - current enabled state of the physical encoder + * @DPU_ENC_DISABLING: Encoder transitioning to disable state + * Events bounding transition are encoder type specific + * @DPU_ENC_DISABLED: Encoder is disabled + * @DPU_ENC_ENABLING: Encoder transitioning to enabled + * Events bounding transition are encoder type specific + * @DPU_ENC_ENABLED: Encoder is enabled + * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset + * to recover from a previous error + */ +enum dpu_enc_enable_state { + DPU_ENC_DISABLING, + DPU_ENC_DISABLED, + DPU_ENC_ENABLING, + DPU_ENC_ENABLED, + DPU_ENC_ERR_NEEDS_HW_RESET +}; + +struct dpu_encoder_phys; + +/** + * struct dpu_encoder_virt_ops - Interface the containing virtual encoder + * provides for the physical encoders to use to callback. + * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception + * Note: This is called from IRQ handler context. + * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception + * Note: This is called from IRQ handler context. + * @handle_frame_done: Notify virtual encoder that this phys encoder + * completes last request frame. + */ +struct dpu_encoder_virt_ops { + void (*handle_vblank_virt)(struct drm_encoder *, + struct dpu_encoder_phys *phys); + void (*handle_underrun_virt)(struct drm_encoder *, + struct dpu_encoder_phys *phys); + void (*handle_frame_done)(struct drm_encoder *, + struct dpu_encoder_phys *phys, u32 event); +}; + +/** + * struct dpu_encoder_phys_ops - Interface the physical encoders provide to + * the containing virtual encoder. + * @late_register: DRM Call. Add Userspace interfaces, debugfs. + * @prepare_commit: MSM Atomic Call, start of atomic commit sequence + * @is_master: Whether this phys_enc is the current master + * encoder. Can be switched at enable time. Based + * on split_role and current mode (CMD/VID). + * @atomic_mode_set: DRM Call. Set a DRM mode. + * This likely caches the mode, for use at enable. + * @enable: DRM Call. Enable a DRM mode. + * @disable: DRM Call. Disable mode. + * @atomic_check: DRM Call. Atomic check new DRM state. + * @destroy: DRM Call. Destroy and release resources. + * @control_vblank_irq Register/Deregister for VBLANK IRQ + * @wait_for_commit_done: Wait for hardware to have flushed the + * current pending frames to hardware + * @wait_for_tx_complete: Wait for hardware to transfer the pixels + * to the panel + * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use + * @prepare_for_kickoff: Do any work necessary prior to a kickoff + * For CMD encoder, may wait for previous tx done + * @handle_post_kickoff: Do any work necessary post-kickoff work + * @trigger_start: Process start event on physical encoder + * @needs_single_flush: Whether encoder slaves need to be flushed + * @irq_control: Handler to enable/disable all the encoder IRQs + * @prepare_idle_pc: phys encoder can update the vsync_enable status + * on idle power collapse prepare + * @restore: Restore all the encoder configs. + * @get_line_count: Obtain current vertical line count + */ + +struct dpu_encoder_phys_ops { + int (*late_register)(struct dpu_encoder_phys *encoder, + struct dentry *debugfs_root); + void (*prepare_commit)(struct dpu_encoder_phys *encoder); + bool (*is_master)(struct dpu_encoder_phys *encoder); + void (*atomic_mode_set)(struct dpu_encoder_phys *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + void (*enable)(struct dpu_encoder_phys *encoder); + void (*disable)(struct dpu_encoder_phys *encoder); + int (*atomic_check)(struct dpu_encoder_phys *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); + void (*destroy)(struct dpu_encoder_phys *encoder); + int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable); + int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); + int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); + int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc); + void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc); + void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); + void (*trigger_start)(struct dpu_encoder_phys *phys_enc); + bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc); + void (*irq_control)(struct dpu_encoder_phys *phys, bool enable); + void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc); + void (*restore)(struct dpu_encoder_phys *phys); + int (*get_line_count)(struct dpu_encoder_phys *phys); + int (*get_frame_count)(struct dpu_encoder_phys *phys); + void (*prepare_wb_job)(struct dpu_encoder_phys *phys_enc, + struct drm_writeback_job *job); + void (*cleanup_wb_job)(struct dpu_encoder_phys *phys_enc, + struct drm_writeback_job *job); + bool (*is_valid_for_commit)(struct dpu_encoder_phys *phys_enc); +}; + +/** + * enum dpu_intr_idx - dpu encoder interrupt index + * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel + * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel + * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel + * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel + * @INTR_IDX_WB_DONE: Writeback fone interrupt for virtual connector + */ +enum dpu_intr_idx { + INTR_IDX_VSYNC, + INTR_IDX_PINGPONG, + INTR_IDX_UNDERRUN, + INTR_IDX_CTL_START, + INTR_IDX_RDPTR, + INTR_IDX_WB_DONE, + INTR_IDX_MAX, +}; + +/** + * struct dpu_encoder_phys - physical encoder that drives a single INTF block + * tied to a specific panel / sub-panel. Abstract type, sub-classed by + * phys_vid or phys_cmd for video mode or command mode encs respectively. + * @parent: Pointer to the containing virtual encoder + * @ops: Operations exposed to the virtual encoder + * @parent_ops: Callbacks exposed by the parent to the phys_enc + * @hw_mdptop: Hardware interface to the top registers + * @hw_ctl: Hardware interface to the ctl registers + * @hw_pp: Hardware interface to the ping pong registers + * @hw_intf: Hardware interface to the intf registers + * @hw_wb: Hardware interface to the wb registers + * @dpu_kms: Pointer to the dpu_kms top level + * @cached_mode: DRM mode cached at mode_set time, acted on in enable + * @enabled: Whether the encoder has enabled and running a mode + * @split_role: Role to play in a split-panel configuration + * @intf_mode: Interface mode + * @intf_idx: Interface index on dpu hardware + * @wb_idx: Writeback index on dpu hardware + * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes + * @enable_state: Enable state tracking + * @vblank_refcount: Reference count of vblank request + * @vsync_cnt: Vsync count for the physical encoder + * @underrun_cnt: Underrun count for the physical encoder + * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs + * vs. the number of done/vblank irqs. Should hover + * between 0-2 Incremented when a new kickoff is + * scheduled. Decremented in irq handler + * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start + * pending. + * @pending_kickoff_wq: Wait queue for blocking until kickoff completes + * @irq: IRQ indices + */ +struct dpu_encoder_phys { + struct drm_encoder *parent; + struct dpu_encoder_phys_ops ops; + const struct dpu_encoder_virt_ops *parent_ops; + struct dpu_hw_mdp *hw_mdptop; + struct dpu_hw_ctl *hw_ctl; + struct dpu_hw_pingpong *hw_pp; + struct dpu_hw_intf *hw_intf; + struct dpu_hw_wb *hw_wb; + struct dpu_kms *dpu_kms; + struct drm_display_mode cached_mode; + enum dpu_enc_split_role split_role; + enum dpu_intf_mode intf_mode; + enum dpu_intf intf_idx; + enum dpu_wb wb_idx; + spinlock_t *enc_spinlock; + enum dpu_enc_enable_state enable_state; + atomic_t vblank_refcount; + atomic_t vsync_cnt; + atomic_t underrun_cnt; + atomic_t pending_ctlstart_cnt; + atomic_t pending_kickoff_cnt; + wait_queue_head_t pending_kickoff_wq; + int irq[INTR_IDX_MAX]; +}; + +static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys) +{ + atomic_inc_return(&phys->pending_ctlstart_cnt); + return atomic_inc_return(&phys->pending_kickoff_cnt); +} + +/** + * struct dpu_encoder_phys_wb - sub-class of dpu_encoder_phys to handle command + * mode specific operations + * @base: Baseclass physical encoder structure + * @wbirq_refcount: Reference count of writeback interrupt + * @wb_done_timeout_cnt: number of wb done irq timeout errors + * @wb_cfg: writeback block config to store fb related details + * @wb_conn: backpointer to writeback connector + * @wb_job: backpointer to current writeback job + * @dest: dpu buffer layout for current writeback output buffer + */ +struct dpu_encoder_phys_wb { + struct dpu_encoder_phys base; + atomic_t wbirq_refcount; + int wb_done_timeout_cnt; + struct dpu_hw_wb_cfg wb_cfg; + struct drm_writeback_connector *wb_conn; + struct drm_writeback_job *wb_job; + struct dpu_hw_fmt_layout dest; +}; + +/** + * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command + * mode specific operations + * @base: Baseclass physical encoder structure + * @intf_idx: Intf Block index used by this phys encoder + * @stream_sel: Stream selection for multi-stream interfaces + * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt + * after ctl_start instead of before next frame kickoff + * @pp_timeout_report_cnt: number of pingpong done irq timeout errors + * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK + * @pending_vblank_wq: Wait queue for blocking until VBLANK received + */ +struct dpu_encoder_phys_cmd { + struct dpu_encoder_phys base; + int stream_sel; + bool serialize_wait4pp; + int pp_timeout_report_cnt; + atomic_t pending_vblank_cnt; + wait_queue_head_t pending_vblank_wq; +}; + +/** + * struct dpu_enc_phys_init_params - initialization parameters for phys encs + * @dpu_kms: Pointer to the dpu_kms top level + * @parent: Pointer to the containing virtual encoder + * @parent_ops: Callbacks exposed by the parent to the phys_enc + * @split_role: Role to play in a split-panel configuration + * @intf_idx: Interface index this phys_enc will control + * @wb_idx: Writeback index this phys_enc will control + * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes + */ +struct dpu_enc_phys_init_params { + struct dpu_kms *dpu_kms; + struct drm_encoder *parent; + const struct dpu_encoder_virt_ops *parent_ops; + enum dpu_enc_split_role split_role; + enum dpu_intf intf_idx; + enum dpu_wb wb_idx; + spinlock_t *enc_spinlock; +}; + +/** + * dpu_encoder_wait_info - container for passing arguments to irq wait functions + * @wq: wait queue structure + * @atomic_cnt: wait until atomic_cnt equals zero + * @timeout_ms: timeout value in milliseconds + */ +struct dpu_encoder_wait_info { + wait_queue_head_t *wq; + atomic_t *atomic_cnt; + s64 timeout_ms; +}; + +/** + * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ +struct dpu_encoder_phys *dpu_encoder_phys_vid_init( + struct dpu_enc_phys_init_params *p); + +/** + * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder + * @p: Pointer to init params structure + * Return: Error code or newly allocated encoder + */ +struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( + struct dpu_enc_phys_init_params *p); + +/** + * dpu_encoder_phys_wb_init - initialize writeback encoder + * @init: Pointer to init info structure with initialization params + */ +struct dpu_encoder_phys *dpu_encoder_phys_wb_init( + struct dpu_enc_phys_init_params *p); + +/** + * dpu_encoder_helper_trigger_start - control start helper function + * This helper function may be optionally specified by physical + * encoders if they require ctl_start triggering. + * @phys_enc: Pointer to physical encoder structure + */ +void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc); + +static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_crtc_state *dpu_cstate; + + if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING) + return BLEND_3D_NONE; + + dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state); + + /* Use merge_3d unless DSC MERGE topology is used */ + if (phys_enc->split_role == ENC_ROLE_SOLO && + dpu_cstate->num_mixers == CRTC_DUAL_MIXERS && + !dpu_encoder_use_dsc_merge(phys_enc->parent)) + return BLEND_3D_H_ROW_INT; + + return BLEND_3D_NONE; +} + +/** + * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder + * This helper function is used by physical encoder to get DSC blocks mask + * used for this encoder. + * @phys_enc: Pointer to physical encoder structure + */ +unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc); + +/** + * dpu_encoder_helper_split_config - split display configuration helper function + * This helper function may be used by physical encoders to configure + * the split display related registers. + * @phys_enc: Pointer to physical encoder structure + * @interface: enum dpu_intf setting + */ +void dpu_encoder_helper_split_config( + struct dpu_encoder_phys *phys_enc, + enum dpu_intf interface); + +/** + * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has + * timed out, including reporting frame error event to crtc and debug dump + * @phys_enc: Pointer to physical encoder structure + * @intr_idx: Failing interrupt index + */ +void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc, + enum dpu_intr_idx intr_idx); + +/** + * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. + * note: will call dpu_encoder_helper_wait_for_irq on timeout + * @phys_enc: Pointer to physical encoder structure + * @irq: IRQ index + * @func: IRQ callback to be called in case of timeout + * @wait_info: wait info struct + * @Return: 0 or -ERROR + */ +int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, + int irq, + void (*func)(void *arg, int irq_idx), + struct dpu_encoder_wait_info *wait_info); + +/** + * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline + * @phys_enc: Pointer to physical encoder structure + */ +void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc); + +#endif /* __dpu_encoder_phys_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c new file mode 100644 index 000000000..ae28b2b93 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include +#include "dpu_encoder_phys.h" +#include "dpu_hw_interrupts.h" +#include "dpu_hw_pingpong.h" +#include "dpu_core_irq.h" +#include "dpu_formats.h" +#include "dpu_trace.h" +#include "disp/msm_disp_snapshot.h" + +#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ + (e) && (e)->base.parent ? \ + (e)->base.parent->base.id : -1, \ + (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__) + +#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \ + (e) && (e)->base.parent ? \ + (e)->base.parent->base.id : -1, \ + (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__) + +#define to_dpu_encoder_phys_cmd(x) \ + container_of(x, struct dpu_encoder_phys_cmd, base) + +#define PP_TIMEOUT_MAX_TRIALS 10 + +/* + * Tearcheck sync start and continue thresholds are empirically found + * based on common panels In the future, may want to allow panels to override + * these default values + */ +#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4 +#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4 + +#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000 + +#define DPU_ENC_MAX_POLL_TIMEOUT_US 2000 + +static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc) +{ + return (phys_enc->split_role != ENC_ROLE_SLAVE); +} + +static void _dpu_encoder_phys_cmd_update_intf_cfg( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + struct dpu_hw_ctl *ctl; + struct dpu_hw_intf_cfg intf_cfg = { 0 }; + + ctl = phys_enc->hw_ctl; + if (!ctl->ops.setup_intf_cfg) + return; + + intf_cfg.intf = phys_enc->intf_idx; + intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD; + intf_cfg.stream_sel = cmd_enc->stream_sel; + intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); + ctl->ops.setup_intf_cfg(ctl, &intf_cfg); + + /* setup which pp blk will connect to this intf */ + if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk) + phys_enc->hw_intf->ops.bind_pingpong_blk( + phys_enc->hw_intf, + true, + phys_enc->hw_pp->idx); +} + +static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx) +{ + struct dpu_encoder_phys *phys_enc = arg; + unsigned long lock_flags; + int new_cnt; + u32 event = DPU_ENCODER_FRAME_EVENT_DONE; + + if (!phys_enc->hw_pp) + return; + + DPU_ATRACE_BEGIN("pp_done_irq"); + /* notify all synchronous clients first, then asynchronous clients */ + if (phys_enc->parent_ops->handle_frame_done) + phys_enc->parent_ops->handle_frame_done(phys_enc->parent, + phys_enc, event); + + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, + new_cnt, event); + + /* Signal any waiting atomic commit thread */ + wake_up_all(&phys_enc->pending_kickoff_wq); + DPU_ATRACE_END("pp_done_irq"); +} + +static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx) +{ + struct dpu_encoder_phys *phys_enc = arg; + struct dpu_encoder_phys_cmd *cmd_enc; + + if (!phys_enc->hw_pp) + return; + + DPU_ATRACE_BEGIN("rd_ptr_irq"); + cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); + + if (phys_enc->parent_ops->handle_vblank_virt) + phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent, + phys_enc); + + atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0); + wake_up_all(&cmd_enc->pending_vblank_wq); + DPU_ATRACE_END("rd_ptr_irq"); +} + +static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx) +{ + struct dpu_encoder_phys *phys_enc = arg; + + DPU_ATRACE_BEGIN("ctl_start_irq"); + + atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0); + + /* Signal any waiting ctl start interrupt */ + wake_up_all(&phys_enc->pending_kickoff_wq); + DPU_ATRACE_END("ctl_start_irq"); +} + +static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx) +{ + struct dpu_encoder_phys *phys_enc = arg; + + if (phys_enc->parent_ops->handle_underrun_virt) + phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent, + phys_enc); +} + +static void dpu_encoder_phys_cmd_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start; + + phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done; + + phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr; + + phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; +} + +static int _dpu_encoder_phys_cmd_handle_ppdone_timeout( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR; + bool do_log = false; + struct drm_encoder *drm_enc; + + if (!phys_enc->hw_pp) + return -EINVAL; + + drm_enc = phys_enc->parent; + + cmd_enc->pp_timeout_report_cnt++; + if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) { + frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD; + do_log = true; + } else if (cmd_enc->pp_timeout_report_cnt == 1) { + do_log = true; + } + + trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc), + phys_enc->hw_pp->idx - PINGPONG_0, + cmd_enc->pp_timeout_report_cnt, + atomic_read(&phys_enc->pending_kickoff_cnt), + frame_event); + + /* to avoid flooding, only log first time, and "dead" time */ + if (do_log) { + DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n", + DRMID(drm_enc), + phys_enc->hw_pp->idx - PINGPONG_0, + phys_enc->hw_ctl->idx - CTL_0, + cmd_enc->pp_timeout_report_cnt, + atomic_read(&phys_enc->pending_kickoff_cnt)); + msm_disp_snapshot_state(drm_enc->dev); + dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_RDPTR]); + } + + atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); + + /* request a ctl reset before the next kickoff */ + phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET; + + if (phys_enc->parent_ops->handle_frame_done) + phys_enc->parent_ops->handle_frame_done( + drm_enc, phys_enc, frame_event); + + return -ETIMEDOUT; +} + +static int _dpu_encoder_phys_cmd_wait_for_idle( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + struct dpu_encoder_wait_info wait_info; + int ret; + + wait_info.wq = &phys_enc->pending_kickoff_wq; + wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; + wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; + + ret = dpu_encoder_helper_wait_for_irq(phys_enc, + phys_enc->irq[INTR_IDX_PINGPONG], + dpu_encoder_phys_cmd_pp_tx_done_irq, + &wait_info); + if (ret == -ETIMEDOUT) + _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc); + else if (!ret) + cmd_enc->pp_timeout_report_cnt = 0; + + return ret; +} + +static int dpu_encoder_phys_cmd_control_vblank_irq( + struct dpu_encoder_phys *phys_enc, + bool enable) +{ + int ret = 0; + int refcount; + + if (!phys_enc->hw_pp) { + DPU_ERROR("invalid encoder\n"); + return -EINVAL; + } + + refcount = atomic_read(&phys_enc->vblank_refcount); + + /* Slave encoders don't report vblank */ + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + goto end; + + /* protect against negative */ + if (!enable && refcount == 0) { + ret = -EINVAL; + goto end; + } + + DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, + enable ? "true" : "false", refcount); + + if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) + ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_RDPTR], + dpu_encoder_phys_cmd_pp_rd_ptr_irq, + phys_enc); + else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0) + ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_RDPTR]); + +end: + if (ret) { + DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n", + DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, ret, + enable ? "true" : "false", refcount); + } + + return ret; +} + +static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc, + bool enable) +{ + trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, + enable, atomic_read(&phys_enc->vblank_refcount)); + + if (enable) { + dpu_core_irq_register_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_PINGPONG], + dpu_encoder_phys_cmd_pp_tx_done_irq, + phys_enc); + dpu_core_irq_register_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_UNDERRUN], + dpu_encoder_phys_cmd_underrun_irq, + phys_enc); + dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true); + + if (dpu_encoder_phys_cmd_is_master(phys_enc)) + dpu_core_irq_register_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_CTL_START], + dpu_encoder_phys_cmd_ctl_start_irq, + phys_enc); + } else { + if (dpu_encoder_phys_cmd_is_master(phys_enc)) + dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_CTL_START]); + + dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_UNDERRUN]); + dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false); + dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_PINGPONG]); + } +} + +static void dpu_encoder_phys_cmd_tearcheck_config( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + struct dpu_hw_tear_check tc_cfg = { 0 }; + struct drm_display_mode *mode; + bool tc_enable = true; + u32 vsync_hz; + struct dpu_kms *dpu_kms; + + if (!phys_enc->hw_pp) { + DPU_ERROR("invalid encoder\n"); + return; + } + mode = &phys_enc->cached_mode; + + DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0); + + if (!phys_enc->hw_pp->ops.setup_tearcheck || + !phys_enc->hw_pp->ops.enable_tearcheck) { + DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n"); + return; + } + + dpu_kms = phys_enc->dpu_kms; + + /* + * TE default: dsi byte clock calculated base on 70 fps; + * around 14 ms to complete a kickoff cycle if te disabled; + * vclk_line base on 60 fps; write is faster than read; + * init == start == rdptr; + * + * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel + * frequency divided by the no. of rows (lines) in the LCDpanel. + */ + vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync"); + if (vsync_hz <= 0) { + DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n", + vsync_hz); + return; + } + + tc_cfg.vsync_count = vsync_hz / + (mode->vtotal * drm_mode_vrefresh(mode)); + + /* + * Set the sync_cfg_height to twice vtotal so that if we lose a + * TE event coming from the display TE pin we won't stall immediately + */ + tc_cfg.hw_vsync_mode = 1; + tc_cfg.sync_cfg_height = mode->vtotal * 2; + tc_cfg.vsync_init_val = mode->vdisplay; + tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START; + tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE; + tc_cfg.start_pos = mode->vdisplay; + tc_cfg.rd_ptr_irq = mode->vdisplay + 1; + + DPU_DEBUG_CMDENC(cmd_enc, + "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n", + phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz, + mode->vtotal, drm_mode_vrefresh(mode)); + DPU_DEBUG_CMDENC(cmd_enc, + "tc %d enable %u start_pos %u rd_ptr_irq %u\n", + phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos, + tc_cfg.rd_ptr_irq); + DPU_DEBUG_CMDENC(cmd_enc, + "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n", + phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode, + tc_cfg.vsync_count, tc_cfg.vsync_init_val); + DPU_DEBUG_CMDENC(cmd_enc, + "tc %d cfgheight %u thresh_start %u thresh_cont %u\n", + phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height, + tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue); + + phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg); + phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable); +} + +static void _dpu_encoder_phys_cmd_pingpong_config( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + + if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) { + DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL); + return; + } + + DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n", + phys_enc->hw_pp->idx - PINGPONG_0); + drm_mode_debug_printmodeline(&phys_enc->cached_mode); + + _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc); + dpu_encoder_phys_cmd_tearcheck_config(phys_enc); +} + +static bool dpu_encoder_phys_cmd_needs_single_flush( + struct dpu_encoder_phys *phys_enc) +{ + /** + * we do separate flush for each CTL and let + * CTL_START synchronize them + */ + return false; +} + +static void dpu_encoder_phys_cmd_enable_helper( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_ctl *ctl; + + if (!phys_enc->hw_pp) { + DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL); + return; + } + + dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx); + + _dpu_encoder_phys_cmd_pingpong_config(phys_enc); + + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return; + + ctl = phys_enc->hw_ctl; + ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx); +} + +static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + + if (!phys_enc->hw_pp) { + DPU_ERROR("invalid phys encoder\n"); + return; + } + + DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0); + + if (phys_enc->enable_state == DPU_ENC_ENABLED) { + DPU_ERROR("already enabled\n"); + return; + } + + dpu_encoder_phys_cmd_enable_helper(phys_enc); + phys_enc->enable_state = DPU_ENC_ENABLED; +} + +static void _dpu_encoder_phys_cmd_connect_te( + struct dpu_encoder_phys *phys_enc, bool enable) +{ + if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te) + return; + + trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable); + phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable); +} + +static void dpu_encoder_phys_cmd_prepare_idle_pc( + struct dpu_encoder_phys *phys_enc) +{ + _dpu_encoder_phys_cmd_connect_te(phys_enc, false); +} + +static int dpu_encoder_phys_cmd_get_line_count( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_pingpong *hw_pp; + + if (!phys_enc->hw_pp) + return -EINVAL; + + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return -EINVAL; + + hw_pp = phys_enc->hw_pp; + if (!hw_pp->ops.get_line_count) + return -EINVAL; + + return hw_pp->ops.get_line_count(hw_pp); +} + +static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + struct dpu_hw_ctl *ctl; + + if (!phys_enc->hw_pp) { + DPU_ERROR("invalid encoder\n"); + return; + } + DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, + phys_enc->enable_state); + + if (phys_enc->enable_state == DPU_ENC_DISABLED) { + DPU_ERROR_CMDENC(cmd_enc, "already disabled\n"); + return; + } + + if (phys_enc->hw_pp->ops.enable_tearcheck) + phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false); + + if (phys_enc->hw_intf->ops.bind_pingpong_blk) { + phys_enc->hw_intf->ops.bind_pingpong_blk( + phys_enc->hw_intf, + false, + phys_enc->hw_pp->idx); + + ctl = phys_enc->hw_ctl; + ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx); + } + + phys_enc->enable_state = DPU_ENC_DISABLED; +} + +static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + + kfree(cmd_enc); +} + +static void dpu_encoder_phys_cmd_prepare_for_kickoff( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + int ret; + + if (!phys_enc->hw_pp) { + DPU_ERROR("invalid encoder\n"); + return; + } + DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(&phys_enc->pending_kickoff_cnt)); + + /* + * Mark kickoff request as outstanding. If there are more than one, + * outstanding, then we have to wait for the previous one to complete + */ + ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc); + if (ret) { + /* force pending_kickoff_cnt 0 to discard failed kickoff */ + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n", + DRMID(phys_enc->parent), ret, + phys_enc->hw_pp->idx - PINGPONG_0); + } + + DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n", + phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(&phys_enc->pending_kickoff_cnt)); +} + +static bool dpu_encoder_phys_cmd_is_ongoing_pptx( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_pp_vsync_info info; + + if (!phys_enc) + return false; + + phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info); + if (info.wr_ptr_line_count > 0 && + info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay) + return true; + + return false; +} + +static void dpu_encoder_phys_cmd_prepare_commit( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + int trial = 0; + + if (!phys_enc) + return; + if (!phys_enc->hw_pp) + return; + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return; + + /* If autorefresh is already disabled, we have nothing to do */ + if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL)) + return; + + /* + * If autorefresh is enabled, disable it and make sure it is safe to + * proceed with current frame commit/push. Sequence fallowed is, + * 1. Disable TE + * 2. Disable autorefresh config + * 4. Poll for frame transfer ongoing to be false + * 5. Enable TE back + */ + _dpu_encoder_phys_cmd_connect_te(phys_enc, false); + phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false); + + do { + udelay(DPU_ENC_MAX_POLL_TIMEOUT_US); + if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US) + > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) { + DPU_ERROR_CMDENC(cmd_enc, + "disable autorefresh failed\n"); + break; + } + + trial++; + } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc)); + + _dpu_encoder_phys_cmd_connect_te(phys_enc, true); + + DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), + "disabled autorefresh\n"); +} + +static int _dpu_encoder_phys_cmd_wait_for_ctl_start( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_cmd *cmd_enc = + to_dpu_encoder_phys_cmd(phys_enc); + struct dpu_encoder_wait_info wait_info; + int ret; + + wait_info.wq = &phys_enc->pending_kickoff_wq; + wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt; + wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; + + ret = dpu_encoder_helper_wait_for_irq(phys_enc, + phys_enc->irq[INTR_IDX_CTL_START], + dpu_encoder_phys_cmd_ctl_start_irq, + &wait_info); + if (ret == -ETIMEDOUT) { + DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n"); + ret = -EINVAL; + } else if (!ret) + ret = 0; + + return ret; +} + +static int dpu_encoder_phys_cmd_wait_for_tx_complete( + struct dpu_encoder_phys *phys_enc) +{ + int rc; + + rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc); + if (rc) { + DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n", + DRMID(phys_enc->parent), rc, + phys_enc->intf_idx - INTF_0); + } + + return rc; +} + +static int dpu_encoder_phys_cmd_wait_for_commit_done( + struct dpu_encoder_phys *phys_enc) +{ + /* only required for master controller */ + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return 0; + + if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl)) + return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc); + + return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc); +} + +static int dpu_encoder_phys_cmd_wait_for_vblank( + struct dpu_encoder_phys *phys_enc) +{ + int rc = 0; + struct dpu_encoder_phys_cmd *cmd_enc; + struct dpu_encoder_wait_info wait_info; + + cmd_enc = to_dpu_encoder_phys_cmd(phys_enc); + + /* only required for master controller */ + if (!dpu_encoder_phys_cmd_is_master(phys_enc)) + return rc; + + wait_info.wq = &cmd_enc->pending_vblank_wq; + wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt; + wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; + + atomic_inc(&cmd_enc->pending_vblank_cnt); + + rc = dpu_encoder_helper_wait_for_irq(phys_enc, + phys_enc->irq[INTR_IDX_RDPTR], + dpu_encoder_phys_cmd_pp_rd_ptr_irq, + &wait_info); + + return rc; +} + +static void dpu_encoder_phys_cmd_handle_post_kickoff( + struct dpu_encoder_phys *phys_enc) +{ + /** + * re-enable external TE, either for the first time after enabling + * or if disabled for Autorefresh + */ + _dpu_encoder_phys_cmd_connect_te(phys_enc, true); +} + +static void dpu_encoder_phys_cmd_trigger_start( + struct dpu_encoder_phys *phys_enc) +{ + dpu_encoder_helper_trigger_start(phys_enc); +} + +static void dpu_encoder_phys_cmd_init_ops( + struct dpu_encoder_phys_ops *ops) +{ + ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit; + ops->is_master = dpu_encoder_phys_cmd_is_master; + ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set; + ops->enable = dpu_encoder_phys_cmd_enable; + ops->disable = dpu_encoder_phys_cmd_disable; + ops->destroy = dpu_encoder_phys_cmd_destroy; + ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq; + ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done; + ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff; + ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete; + ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank; + ops->trigger_start = dpu_encoder_phys_cmd_trigger_start; + ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush; + ops->irq_control = dpu_encoder_phys_cmd_irq_control; + ops->restore = dpu_encoder_phys_cmd_enable_helper; + ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc; + ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff; + ops->get_line_count = dpu_encoder_phys_cmd_get_line_count; +} + +struct dpu_encoder_phys *dpu_encoder_phys_cmd_init( + struct dpu_enc_phys_init_params *p) +{ + struct dpu_encoder_phys *phys_enc = NULL; + struct dpu_encoder_phys_cmd *cmd_enc = NULL; + int i, ret = 0; + + DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0); + + cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL); + if (!cmd_enc) { + ret = -ENOMEM; + DPU_ERROR("failed to allocate\n"); + return ERR_PTR(ret); + } + phys_enc = &cmd_enc->base; + phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; + phys_enc->intf_idx = p->intf_idx; + + dpu_encoder_phys_cmd_init_ops(&phys_enc->ops); + phys_enc->parent = p->parent; + phys_enc->parent_ops = p->parent_ops; + phys_enc->dpu_kms = p->dpu_kms; + phys_enc->split_role = p->split_role; + phys_enc->intf_mode = INTF_MODE_CMD; + phys_enc->enc_spinlock = p->enc_spinlock; + cmd_enc->stream_sel = 0; + phys_enc->enable_state = DPU_ENC_DISABLED; + for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++) + phys_enc->irq[i] = -EINVAL; + + atomic_set(&phys_enc->vblank_refcount, 0); + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + atomic_set(&phys_enc->pending_ctlstart_cnt, 0); + atomic_set(&cmd_enc->pending_vblank_cnt, 0); + init_waitqueue_head(&phys_enc->pending_kickoff_wq); + init_waitqueue_head(&cmd_enc->pending_vblank_wq); + + DPU_DEBUG_CMDENC(cmd_enc, "created\n"); + + return phys_enc; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c new file mode 100644 index 000000000..2c1464666 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -0,0 +1,716 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include "dpu_encoder_phys.h" +#include "dpu_hw_interrupts.h" +#include "dpu_hw_merge3d.h" +#include "dpu_core_irq.h" +#include "dpu_formats.h" +#include "dpu_trace.h" +#include "disp/msm_disp_snapshot.h" + +#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \ + (e) && (e)->parent ? \ + (e)->parent->base.id : -1, \ + (e) && (e)->hw_intf ? \ + (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__) + +#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \ + (e) && (e)->parent ? \ + (e)->parent->base.id : -1, \ + (e) && (e)->hw_intf ? \ + (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__) + +#define to_dpu_encoder_phys_vid(x) \ + container_of(x, struct dpu_encoder_phys_vid, base) + +static bool dpu_encoder_phys_vid_is_master( + struct dpu_encoder_phys *phys_enc) +{ + bool ret = false; + + if (phys_enc->split_role != ENC_ROLE_SLAVE) + ret = true; + + return ret; +} + +static void drm_mode_to_intf_timing_params( + const struct dpu_encoder_phys *phys_enc, + const struct drm_display_mode *mode, + struct intf_timing_params *timing) +{ + memset(timing, 0, sizeof(*timing)); + + if ((mode->htotal < mode->hsync_end) + || (mode->hsync_start < mode->hdisplay) + || (mode->vtotal < mode->vsync_end) + || (mode->vsync_start < mode->vdisplay) + || (mode->hsync_end < mode->hsync_start) + || (mode->vsync_end < mode->vsync_start)) { + DPU_ERROR( + "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n", + mode->hsync_start, mode->hsync_end, + mode->htotal, mode->hdisplay); + DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n", + mode->vsync_start, mode->vsync_end, + mode->vtotal, mode->vdisplay); + return; + } + + /* + * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html + * Active Region Front Porch Sync Back Porch + * <-----------------><------------><-----><-----------> + * <- [hv]display ---> + * <--------- [hv]sync_start ------> + * <----------------- [hv]sync_end -------> + * <---------------------------- [hv]total -------------> + */ + timing->width = mode->hdisplay; /* active width */ + timing->height = mode->vdisplay; /* active height */ + timing->xres = timing->width; + timing->yres = timing->height; + timing->h_back_porch = mode->htotal - mode->hsync_end; + timing->h_front_porch = mode->hsync_start - mode->hdisplay; + timing->v_back_porch = mode->vtotal - mode->vsync_end; + timing->v_front_porch = mode->vsync_start - mode->vdisplay; + timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start; + timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start; + timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0; + timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0; + timing->border_clr = 0; + timing->underflow_clr = 0xff; + timing->hsync_skew = mode->hskew; + + /* DSI controller cannot handle active-low sync signals. */ + if (phys_enc->hw_intf->cap->type == INTF_DSI) { + timing->hsync_polarity = 0; + timing->vsync_polarity = 0; + } + + /* for DP/EDP, Shift timings to align it to bottom right */ + if (phys_enc->hw_intf->cap->type == INTF_DP) { + timing->h_back_porch += timing->h_front_porch; + timing->h_front_porch = 0; + timing->v_back_porch += timing->v_front_porch; + timing->v_front_porch = 0; + } + + timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent); + + /* + * for DP, divide the horizonal parameters by 2 when + * widebus is enabled + */ + if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) { + timing->width = timing->width >> 1; + timing->xres = timing->xres >> 1; + timing->h_back_porch = timing->h_back_porch >> 1; + timing->h_front_porch = timing->h_front_porch >> 1; + timing->hsync_pulse_width = timing->hsync_pulse_width >> 1; + } +} + +static u32 get_horizontal_total(const struct intf_timing_params *timing) +{ + u32 active = timing->xres; + u32 inactive = + timing->h_back_porch + timing->h_front_porch + + timing->hsync_pulse_width; + return active + inactive; +} + +static u32 get_vertical_total(const struct intf_timing_params *timing) +{ + u32 active = timing->yres; + u32 inactive = + timing->v_back_porch + timing->v_front_porch + + timing->vsync_pulse_width; + return active + inactive; +} + +/* + * programmable_fetch_get_num_lines: + * Number of fetch lines in vertical front porch + * @timing: Pointer to the intf timing information for the requested mode + * + * Returns the number of fetch lines in vertical front porch at which mdp + * can start fetching the next frame. + * + * Number of needed prefetch lines is anything that cannot be absorbed in the + * start of frame time (back porch + vsync pulse width). + * + * Some panels have very large VFP, however we only need a total number of + * lines based on the chip worst case latencies. + */ +static u32 programmable_fetch_get_num_lines( + struct dpu_encoder_phys *phys_enc, + const struct intf_timing_params *timing) +{ + u32 worst_case_needed_lines = + phys_enc->hw_intf->cap->prog_fetch_lines_worst_case; + u32 start_of_frame_lines = + timing->v_back_porch + timing->vsync_pulse_width; + u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines; + u32 actual_vfp_lines = 0; + + /* Fetch must be outside active lines, otherwise undefined. */ + if (start_of_frame_lines >= worst_case_needed_lines) { + DPU_DEBUG_VIDENC(phys_enc, + "prog fetch is not needed, large vbp+vsw\n"); + actual_vfp_lines = 0; + } else if (timing->v_front_porch < needed_vfp_lines) { + /* Warn fetch needed, but not enough porch in panel config */ + pr_warn_once + ("low vbp+vfp may lead to perf issues in some cases\n"); + DPU_DEBUG_VIDENC(phys_enc, + "less vfp than fetch req, using entire vfp\n"); + actual_vfp_lines = timing->v_front_porch; + } else { + DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n"); + actual_vfp_lines = needed_vfp_lines; + } + + DPU_DEBUG_VIDENC(phys_enc, + "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n", + timing->v_front_porch, timing->v_back_porch, + timing->vsync_pulse_width); + DPU_DEBUG_VIDENC(phys_enc, + "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n", + worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines); + + return actual_vfp_lines; +} + +/* + * programmable_fetch_config: Programs HW to prefetch lines by offsetting + * the start of fetch into the vertical front porch for cases where the + * vsync pulse width and vertical back porch time is insufficient + * + * Gets # of lines to pre-fetch, then calculate VSYNC counter value. + * HW layer requires VSYNC counter of first pixel of tgt VFP line. + * + * @timing: Pointer to the intf timing information for the requested mode + */ +static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc, + const struct intf_timing_params *timing) +{ + struct intf_prog_fetch f = { 0 }; + u32 vfp_fetch_lines = 0; + u32 horiz_total = 0; + u32 vert_total = 0; + u32 vfp_fetch_start_vsync_counter = 0; + unsigned long lock_flags; + + if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch)) + return; + + vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing); + if (vfp_fetch_lines) { + vert_total = get_vertical_total(timing); + horiz_total = get_horizontal_total(timing); + vfp_fetch_start_vsync_counter = + (vert_total - vfp_fetch_lines) * horiz_total + 1; + f.enable = 1; + f.fetch_start = vfp_fetch_start_vsync_counter; + } + + DPU_DEBUG_VIDENC(phys_enc, + "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n", + vfp_fetch_lines, vfp_fetch_start_vsync_counter); + + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); +} + +static void dpu_encoder_phys_vid_setup_timing_engine( + struct dpu_encoder_phys *phys_enc) +{ + struct drm_display_mode mode; + struct intf_timing_params timing_params = { 0 }; + const struct dpu_format *fmt = NULL; + u32 fmt_fourcc = DRM_FORMAT_RGB888; + unsigned long lock_flags; + struct dpu_hw_intf_cfg intf_cfg = { 0 }; + + if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { + DPU_ERROR("invalid encoder %d\n", phys_enc != NULL); + return; + } + + mode = phys_enc->cached_mode; + if (!phys_enc->hw_intf->ops.setup_timing_gen) { + DPU_ERROR("timing engine setup is not supported\n"); + return; + } + + DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n"); + drm_mode_debug_printmodeline(&mode); + + if (phys_enc->split_role != ENC_ROLE_SOLO) { + mode.hdisplay >>= 1; + mode.htotal >>= 1; + mode.hsync_start >>= 1; + mode.hsync_end >>= 1; + + DPU_DEBUG_VIDENC(phys_enc, + "split_role %d, halve horizontal %d %d %d %d\n", + phys_enc->split_role, + mode.hdisplay, mode.htotal, + mode.hsync_start, mode.hsync_end); + } + + drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params); + + fmt = dpu_get_dpu_format(fmt_fourcc); + DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc); + + intf_cfg.intf = phys_enc->hw_intf->idx; + intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID; + intf_cfg.stream_sel = 0; /* Don't care value for video mode */ + intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); + if (phys_enc->hw_pp->merge_3d) + intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; + + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, + &timing_params, fmt); + phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); + + /* setup which pp blk will connect to this intf */ + if (phys_enc->hw_intf->ops.bind_pingpong_blk) + phys_enc->hw_intf->ops.bind_pingpong_blk( + phys_enc->hw_intf, + true, + phys_enc->hw_pp->idx); + + if (phys_enc->hw_pp->merge_3d) + phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, intf_cfg.mode_3d); + + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + programmable_fetch_config(phys_enc, &timing_params); +} + +static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) +{ + struct dpu_encoder_phys *phys_enc = arg; + struct dpu_hw_ctl *hw_ctl; + unsigned long lock_flags; + u32 flush_register = 0; + + hw_ctl = phys_enc->hw_ctl; + + DPU_ATRACE_BEGIN("vblank_irq"); + + if (phys_enc->parent_ops->handle_vblank_virt) + phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent, + phys_enc); + + atomic_read(&phys_enc->pending_kickoff_cnt); + + /* + * only decrement the pending flush count if we've actually flushed + * hardware. due to sw irq latency, vblank may have already happened + * so we need to double-check with hw that it accepted the flush bits + */ + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + if (hw_ctl->ops.get_flush_register) + flush_register = hw_ctl->ops.get_flush_register(hw_ctl); + + if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl))) + atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + /* Signal any waiting atomic commit thread */ + wake_up_all(&phys_enc->pending_kickoff_wq); + + phys_enc->parent_ops->handle_frame_done(phys_enc->parent, phys_enc, + DPU_ENCODER_FRAME_EVENT_DONE); + + DPU_ATRACE_END("vblank_irq"); +} + +static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx) +{ + struct dpu_encoder_phys *phys_enc = arg; + + if (phys_enc->parent_ops->handle_underrun_virt) + phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent, + phys_enc); +} + +static bool dpu_encoder_phys_vid_needs_single_flush( + struct dpu_encoder_phys *phys_enc) +{ + return phys_enc->split_role != ENC_ROLE_SOLO; +} + +static void dpu_encoder_phys_vid_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync; + + phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun; +} + +static int dpu_encoder_phys_vid_control_vblank_irq( + struct dpu_encoder_phys *phys_enc, + bool enable) +{ + int ret = 0; + int refcount; + + refcount = atomic_read(&phys_enc->vblank_refcount); + + /* Slave encoders don't report vblank */ + if (!dpu_encoder_phys_vid_is_master(phys_enc)) + goto end; + + /* protect against negative */ + if (!enable && refcount == 0) { + ret = -EINVAL; + goto end; + } + + DRM_DEBUG_VBL("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable, + atomic_read(&phys_enc->vblank_refcount)); + + if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) + ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_VSYNC], + dpu_encoder_phys_vid_vblank_irq, + phys_enc); + else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0) + ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_VSYNC]); + +end: + if (ret) { + DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n", + DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, ret, enable, + refcount); + } + return ret; +} + +static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_ctl *ctl; + + ctl = phys_enc->hw_ctl; + + DPU_DEBUG_VIDENC(phys_enc, "\n"); + + if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) + return; + + dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx); + + dpu_encoder_phys_vid_setup_timing_engine(phys_enc); + + /* + * For single flush cases (dual-ctl or pp-split), skip setting the + * flush bit for the slave intf, since both intfs use same ctl + * and HW will only flush the master. + */ + if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) && + !dpu_encoder_phys_vid_is_master(phys_enc)) + goto skip_flush; + + ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx); + if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d) + ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx); + +skip_flush: + DPU_DEBUG_VIDENC(phys_enc, + "update pending flush ctl %d intf %d\n", + ctl->idx - CTL_0, phys_enc->hw_intf->idx); + + atomic_set(&phys_enc->underrun_cnt, 0); + + /* ctl_flush & timing engine enable will be triggered by framework */ + if (phys_enc->enable_state == DPU_ENC_DISABLED) + phys_enc->enable_state = DPU_ENC_ENABLING; +} + +static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc) +{ + DPU_DEBUG_VIDENC(phys_enc, "\n"); + kfree(phys_enc); +} + +static int dpu_encoder_phys_vid_wait_for_vblank( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_wait_info wait_info; + int ret; + + wait_info.wq = &phys_enc->pending_kickoff_wq; + wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; + wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; + + if (!dpu_encoder_phys_vid_is_master(phys_enc)) { + return 0; + } + + /* Wait for kickoff to complete */ + ret = dpu_encoder_helper_wait_for_irq(phys_enc, + phys_enc->irq[INTR_IDX_VSYNC], + dpu_encoder_phys_vid_vblank_irq, + &wait_info); + + if (ret == -ETIMEDOUT) { + dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC); + } + + return ret; +} + +static int dpu_encoder_phys_vid_wait_for_commit_done( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl; + int ret; + + if (!hw_ctl) + return 0; + + ret = wait_event_timeout(phys_enc->pending_kickoff_wq, + (hw_ctl->ops.get_flush_register(hw_ctl) == 0), + msecs_to_jiffies(50)); + if (ret <= 0) { + DPU_ERROR("vblank timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void dpu_encoder_phys_vid_prepare_for_kickoff( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_ctl *ctl; + int rc; + struct drm_encoder *drm_enc; + + drm_enc = phys_enc->parent; + + ctl = phys_enc->hw_ctl; + if (!ctl->ops.wait_reset_status) + return; + + /* + * hw supports hardware initiated ctl reset, so before we kickoff a new + * frame, need to check and wait for hw initiated ctl reset completion + */ + rc = ctl->ops.wait_reset_status(ctl); + if (rc) { + DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n", + ctl->idx, rc); + msm_disp_snapshot_state(drm_enc->dev); + dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_VSYNC]); + } +} + +static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc) +{ + unsigned long lock_flags; + int ret; + + if (!phys_enc->parent || !phys_enc->parent->dev) { + DPU_ERROR("invalid encoder/device\n"); + return; + } + + if (!phys_enc->hw_intf) { + DPU_ERROR("invalid hw_intf %d hw_ctl %d\n", + phys_enc->hw_intf != NULL, phys_enc->hw_ctl != NULL); + return; + } + + if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing)) + return; + + if (phys_enc->enable_state == DPU_ENC_DISABLED) { + DPU_ERROR("already disabled\n"); + return; + } + + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0); + if (dpu_encoder_phys_vid_is_master(phys_enc)) + dpu_encoder_phys_inc_pending(phys_enc); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + if (dpu_encoder_phys_vid_is_master(phys_enc)) { + ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc); + if (ret) { + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n", + DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, ret); + } + } + + phys_enc->enable_state = DPU_ENC_DISABLED; +} + +static void dpu_encoder_phys_vid_handle_post_kickoff( + struct dpu_encoder_phys *phys_enc) +{ + unsigned long lock_flags; + + /* + * Video mode must flush CTL before enabling timing engine + * Video encoders need to turn on their interfaces now + */ + if (phys_enc->enable_state == DPU_ENC_ENABLING) { + trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0); + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + phys_enc->enable_state = DPU_ENC_ENABLED; + } +} + +static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc, + bool enable) +{ + int ret; + + trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent), + phys_enc->hw_intf->idx - INTF_0, + enable, + atomic_read(&phys_enc->vblank_refcount)); + + if (enable) { + ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true); + if (WARN_ON(ret)) + return; + + dpu_core_irq_register_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_UNDERRUN], + dpu_encoder_phys_vid_underrun_irq, + phys_enc); + } else { + dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false); + dpu_core_irq_unregister_callback(phys_enc->dpu_kms, + phys_enc->irq[INTR_IDX_UNDERRUN]); + } +} + +static int dpu_encoder_phys_vid_get_line_count( + struct dpu_encoder_phys *phys_enc) +{ + if (!dpu_encoder_phys_vid_is_master(phys_enc)) + return -EINVAL; + + if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count) + return -EINVAL; + + return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf); +} + +static int dpu_encoder_phys_vid_get_frame_count( + struct dpu_encoder_phys *phys_enc) +{ + struct intf_status s = {0}; + u32 fetch_start = 0; + struct drm_display_mode mode = phys_enc->cached_mode; + + if (!dpu_encoder_phys_vid_is_master(phys_enc)) + return -EINVAL; + + if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status) + return -EINVAL; + + phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s); + + if (s.is_prog_fetch_en && s.is_en) { + fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay); + if ((s.line_count > fetch_start) && + (s.line_count <= mode.vtotal)) + return s.frame_count + 1; + } + + return s.frame_count; +} + +static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops) +{ + ops->is_master = dpu_encoder_phys_vid_is_master; + ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set; + ops->enable = dpu_encoder_phys_vid_enable; + ops->disable = dpu_encoder_phys_vid_disable; + ops->destroy = dpu_encoder_phys_vid_destroy; + ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq; + ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done; + ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank; + ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank; + ops->irq_control = dpu_encoder_phys_vid_irq_control; + ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff; + ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff; + ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush; + ops->get_line_count = dpu_encoder_phys_vid_get_line_count; + ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count; +} + +struct dpu_encoder_phys *dpu_encoder_phys_vid_init( + struct dpu_enc_phys_init_params *p) +{ + struct dpu_encoder_phys *phys_enc = NULL; + int i; + + if (!p) { + DPU_ERROR("failed to create encoder due to invalid parameter\n"); + return ERR_PTR(-EINVAL); + } + + phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL); + if (!phys_enc) { + DPU_ERROR("failed to create encoder due to memory allocation error\n"); + return ERR_PTR(-ENOMEM); + } + + phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; + phys_enc->intf_idx = p->intf_idx; + + DPU_DEBUG_VIDENC(phys_enc, "\n"); + + dpu_encoder_phys_vid_init_ops(&phys_enc->ops); + phys_enc->parent = p->parent; + phys_enc->parent_ops = p->parent_ops; + phys_enc->dpu_kms = p->dpu_kms; + phys_enc->split_role = p->split_role; + phys_enc->intf_mode = INTF_MODE_VIDEO; + phys_enc->enc_spinlock = p->enc_spinlock; + for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++) + phys_enc->irq[i] = -EINVAL; + + atomic_set(&phys_enc->vblank_refcount, 0); + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + init_waitqueue_head(&phys_enc->pending_kickoff_wq); + phys_enc->enable_state = DPU_ENC_DISABLED; + + DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx); + + return phys_enc; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c new file mode 100644 index 000000000..42c7e378d --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c @@ -0,0 +1,752 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include + +#include + +#include "dpu_encoder_phys.h" +#include "dpu_formats.h" +#include "dpu_hw_top.h" +#include "dpu_hw_wb.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_merge3d.h" +#include "dpu_hw_interrupts.h" +#include "dpu_core_irq.h" +#include "dpu_vbif.h" +#include "dpu_crtc.h" +#include "disp/msm_disp_snapshot.h" + +#define to_dpu_encoder_phys_wb(x) \ + container_of(x, struct dpu_encoder_phys_wb, base) + +/** + * dpu_encoder_phys_wb_is_master - report wb always as master encoder + */ +static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc) +{ + /* there is only one physical enc for dpu_writeback */ + return true; +} + +/** + * dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_set_ot_limit( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_wb *hw_wb = phys_enc->hw_wb; + struct dpu_vbif_set_ot_params ot_params; + + memset(&ot_params, 0, sizeof(ot_params)); + ot_params.xin_id = hw_wb->caps->xin_id; + ot_params.num = hw_wb->idx - WB_0; + ot_params.width = phys_enc->cached_mode.hdisplay; + ot_params.height = phys_enc->cached_mode.vdisplay; + ot_params.is_wfd = true; + ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode); + ot_params.vbif_idx = hw_wb->caps->vbif_idx; + ot_params.clk_ctrl = hw_wb->caps->clk_ctrl; + ot_params.rd = false; + + dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params); +} + +/** + * dpu_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_set_qos_remap( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_wb *hw_wb; + struct dpu_vbif_set_qos_params qos_params; + + if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) { + DPU_ERROR("invalid arguments\n"); + return; + } + + if (!phys_enc->hw_wb || !phys_enc->hw_wb->caps) { + DPU_ERROR("invalid writeback hardware\n"); + return; + } + + hw_wb = phys_enc->hw_wb; + + memset(&qos_params, 0, sizeof(qos_params)); + qos_params.vbif_idx = hw_wb->caps->vbif_idx; + qos_params.xin_id = hw_wb->caps->xin_id; + qos_params.clk_ctrl = hw_wb->caps->clk_ctrl; + qos_params.num = hw_wb->idx - WB_0; + qos_params.is_rt = false; + + DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n", + qos_params.num, + qos_params.vbif_idx, + qos_params.xin_id, qos_params.is_rt); + + dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params); +} + +/** + * dpu_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_wb *hw_wb; + struct dpu_hw_wb_qos_cfg qos_cfg; + const struct dpu_mdss_cfg *catalog; + const struct dpu_qos_lut_tbl *qos_lut_tb; + + if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) { + DPU_ERROR("invalid parameter(s)\n"); + return; + } + + catalog = phys_enc->dpu_kms->catalog; + + hw_wb = phys_enc->hw_wb; + + memset(&qos_cfg, 0, sizeof(struct dpu_hw_wb_qos_cfg)); + qos_cfg.danger_safe_en = true; + qos_cfg.danger_lut = + catalog->perf->danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT]; + + qos_cfg.safe_lut = catalog->perf->safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT]; + + qos_lut_tb = &catalog->perf->qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT]; + qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0); + + if (hw_wb->ops.setup_qos_lut) + hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg); +} + +/** + * dpu_encoder_phys_wb_setup_fb - setup output framebuffer + * @phys_enc: Pointer to physical encoder + * @fb: Pointer to output framebuffer + */ +static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc, + struct drm_framebuffer *fb) +{ + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + struct dpu_hw_wb *hw_wb; + struct dpu_hw_wb_cfg *wb_cfg; + struct dpu_hw_cdp_cfg cdp_cfg; + + if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) { + DPU_ERROR("invalid encoder\n"); + return; + } + + hw_wb = phys_enc->hw_wb; + wb_cfg = &wb_enc->wb_cfg; + + wb_cfg->intf_mode = phys_enc->intf_mode; + wb_cfg->roi.x1 = 0; + wb_cfg->roi.x2 = phys_enc->cached_mode.hdisplay; + wb_cfg->roi.y1 = 0; + wb_cfg->roi.y2 = phys_enc->cached_mode.vdisplay; + + if (hw_wb->ops.setup_roi) + hw_wb->ops.setup_roi(hw_wb, wb_cfg); + + if (hw_wb->ops.setup_outformat) + hw_wb->ops.setup_outformat(hw_wb, wb_cfg); + + if (hw_wb->ops.setup_cdp) { + memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg)); + + cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf->cdp_cfg + [DPU_PERF_CDP_USAGE_NRT].wr_enable; + cdp_cfg.ubwc_meta_enable = + DPU_FORMAT_IS_UBWC(wb_cfg->dest.format); + cdp_cfg.tile_amortize_enable = + DPU_FORMAT_IS_UBWC(wb_cfg->dest.format) || + DPU_FORMAT_IS_TILE(wb_cfg->dest.format); + cdp_cfg.preload_ahead = DPU_WB_CDP_PRELOAD_AHEAD_64; + + hw_wb->ops.setup_cdp(hw_wb, &cdp_cfg); + } + + if (hw_wb->ops.setup_outaddress) + hw_wb->ops.setup_outaddress(hw_wb, wb_cfg); +} + +/** + * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block + * @phys_enc:Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_wb *hw_wb; + struct dpu_hw_ctl *ctl; + + if (!phys_enc) { + DPU_ERROR("invalid encoder\n"); + return; + } + + hw_wb = phys_enc->hw_wb; + ctl = phys_enc->hw_ctl; + + if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && + (phys_enc->hw_ctl && + phys_enc->hw_ctl->ops.setup_intf_cfg)) { + struct dpu_hw_intf_cfg intf_cfg = {0}; + struct dpu_hw_pingpong *hw_pp = phys_enc->hw_pp; + enum dpu_3d_blend_mode mode_3d; + + mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); + + intf_cfg.intf = DPU_NONE; + intf_cfg.wb = hw_wb->idx; + + if (mode_3d && hw_pp && hw_pp->merge_3d) + intf_cfg.merge_3d = hw_pp->merge_3d->idx; + + if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode) + phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, + mode_3d); + + /* setup which pp blk will connect to this wb */ + if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk) + phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, true, + phys_enc->hw_pp->idx); + + phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); + } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) { + struct dpu_hw_intf_cfg intf_cfg = {0}; + + intf_cfg.intf = DPU_NONE; + intf_cfg.wb = hw_wb->idx; + intf_cfg.mode_3d = + dpu_encoder_helper_get_3d_blend_mode(phys_enc); + phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); + } +} + +/** + * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states + * @phys_enc: Pointer to physical encoder + * @crtc_state: Pointer to CRTC atomic state + * @conn_state: Pointer to connector atomic state + */ +static int dpu_encoder_phys_wb_atomic_check( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_framebuffer *fb; + const struct drm_display_mode *mode = &crtc_state->mode; + + DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n", + phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay); + + if (!conn_state || !conn_state->connector) { + DPU_ERROR("invalid connector state\n"); + return -EINVAL; + } else if (conn_state->connector->status != + connector_status_connected) { + DPU_ERROR("connector not connected %d\n", + conn_state->connector->status); + return -EINVAL; + } + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + + DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id, + fb->width, fb->height); + + if (fb->width != mode->hdisplay) { + DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width, + mode->hdisplay); + return -EINVAL; + } else if (fb->height != mode->vdisplay) { + DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height, + mode->vdisplay); + return -EINVAL; + } else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) { + DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n", + fb->width, phys_enc->hw_wb->caps->maxlinewidth); + return -EINVAL; + } + + return 0; +} + + +/** + * _dpu_encoder_phys_wb_update_flush - flush hardware update + * @phys_enc: Pointer to physical encoder + */ +static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_wb *hw_wb; + struct dpu_hw_ctl *hw_ctl; + struct dpu_hw_pingpong *hw_pp; + u32 pending_flush = 0; + + if (!phys_enc) + return; + + hw_wb = phys_enc->hw_wb; + hw_pp = phys_enc->hw_pp; + hw_ctl = phys_enc->hw_ctl; + + DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); + + if (!hw_ctl) { + DPU_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0); + return; + } + + if (hw_ctl->ops.update_pending_flush_wb) + hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx); + + if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d) + hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl, + hw_pp->merge_3d->idx); + + if (hw_ctl->ops.get_pending_flush) + pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl); + + DPU_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n", + hw_ctl->idx - CTL_0, pending_flush, + hw_wb->idx - WB_0); +} + +/** + * dpu_encoder_phys_wb_setup - setup writeback encoder + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_setup( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_wb *hw_wb = phys_enc->hw_wb; + struct drm_display_mode mode = phys_enc->cached_mode; + struct drm_framebuffer *fb = NULL; + + DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n", + hw_wb->idx - WB_0, mode.name, + mode.hdisplay, mode.vdisplay); + + dpu_encoder_phys_wb_set_ot_limit(phys_enc); + + dpu_encoder_phys_wb_set_qos_remap(phys_enc); + + dpu_encoder_phys_wb_set_qos(phys_enc); + + dpu_encoder_phys_wb_setup_fb(phys_enc, fb); + + dpu_encoder_phys_wb_setup_cdp(phys_enc); + +} + +static void _dpu_encoder_phys_wb_frame_done_helper(void *arg) +{ + struct dpu_encoder_phys *phys_enc = arg; + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + + struct dpu_hw_wb *hw_wb = phys_enc->hw_wb; + unsigned long lock_flags; + u32 event = DPU_ENCODER_FRAME_EVENT_DONE; + + DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); + + if (phys_enc->parent_ops->handle_frame_done) + phys_enc->parent_ops->handle_frame_done(phys_enc->parent, + phys_enc, event); + + if (phys_enc->parent_ops->handle_vblank_virt) + phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent, + phys_enc); + + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + if (wb_enc->wb_conn) + drm_writeback_signal_completion(wb_enc->wb_conn, 0); + + /* Signal any waiting atomic commit thread */ + wake_up_all(&phys_enc->pending_kickoff_wq); +} + +/** + * dpu_encoder_phys_wb_done_irq - writeback interrupt handler + * @arg: Pointer to writeback encoder + * @irq_idx: interrupt index + */ +static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx) +{ + _dpu_encoder_phys_wb_frame_done_helper(arg); +} + +/** + * dpu_encoder_phys_wb_irq_ctrl - irq control of WB + * @phys: Pointer to physical encoder + * @enable: indicates enable or disable interrupts + */ +static void dpu_encoder_phys_wb_irq_ctrl( + struct dpu_encoder_phys *phys, bool enable) +{ + + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys); + + if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1) + dpu_core_irq_register_callback(phys->dpu_kms, + phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys); + else if (!enable && + atomic_dec_return(&wb_enc->wbirq_refcount) == 0) + dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]); +} + +static void dpu_encoder_phys_wb_atomic_mode_set( + struct dpu_encoder_phys *phys_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + + phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done; +} + +static void _dpu_encoder_phys_wb_handle_wbdone_timeout( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR; + + wb_enc->wb_done_timeout_cnt++; + + if (wb_enc->wb_done_timeout_cnt == 1) + msm_disp_snapshot_state(phys_enc->parent->dev); + + atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); + + /* request a ctl reset before the next kickoff */ + phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET; + + if (wb_enc->wb_conn) + drm_writeback_signal_completion(wb_enc->wb_conn, 0); + + if (phys_enc->parent_ops->handle_frame_done) + phys_enc->parent_ops->handle_frame_done( + phys_enc->parent, phys_enc, frame_event); +} + +/** + * dpu_encoder_phys_wb_wait_for_commit_done - wait until request is committed + * @phys_enc: Pointer to physical encoder + */ +static int dpu_encoder_phys_wb_wait_for_commit_done( + struct dpu_encoder_phys *phys_enc) +{ + unsigned long ret; + struct dpu_encoder_wait_info wait_info; + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + + wait_info.wq = &phys_enc->pending_kickoff_wq; + wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt; + wait_info.timeout_ms = KICKOFF_TIMEOUT_MS; + + ret = dpu_encoder_helper_wait_for_irq(phys_enc, + phys_enc->irq[INTR_IDX_WB_DONE], + dpu_encoder_phys_wb_done_irq, &wait_info); + if (ret == -ETIMEDOUT) + _dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc); + else if (!ret) + wb_enc->wb_done_timeout_cnt = 0; + + return ret; +} + +/** + * dpu_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing + * @phys_enc: Pointer to physical encoder + * Returns: Zero on success + */ +static void dpu_encoder_phys_wb_prepare_for_kickoff( + struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + struct drm_connector *drm_conn; + struct drm_connector_state *state; + + DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0); + + if (!wb_enc->wb_conn || !wb_enc->wb_job) { + DPU_ERROR("invalid wb_conn or wb_job\n"); + return; + } + + drm_conn = &wb_enc->wb_conn->base; + state = drm_conn->state; + + if (wb_enc->wb_conn && wb_enc->wb_job) + drm_writeback_queue_job(wb_enc->wb_conn, state); + + dpu_encoder_phys_wb_setup(phys_enc); + + _dpu_encoder_phys_wb_update_flush(phys_enc); +} + +/** + * dpu_encoder_phys_wb_needs_single_flush - trigger flush processing + * @phys_enc: Pointer to physical encoder + */ +static bool dpu_encoder_phys_wb_needs_single_flush(struct dpu_encoder_phys *phys_enc) +{ + DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0); + return false; +} + +/** + * dpu_encoder_phys_wb_handle_post_kickoff - post-kickoff processing + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_handle_post_kickoff( + struct dpu_encoder_phys *phys_enc) +{ + DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0); + +} + +/** + * dpu_encoder_phys_wb_enable - enable writeback encoder + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc) +{ + DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0); + phys_enc->enable_state = DPU_ENC_ENABLED; +} +/** + * dpu_encoder_phys_wb_disable - disable writeback encoder + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_hw_wb *hw_wb = phys_enc->hw_wb; + struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl; + + DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); + + if (phys_enc->enable_state == DPU_ENC_DISABLED) { + DPU_ERROR("encoder is already disabled\n"); + return; + } + + /* reset h/w before final flush */ + if (phys_enc->hw_ctl->ops.clear_pending_flush) + phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl); + + /* + * New CTL reset sequence from 5.0 MDP onwards. + * If has_3d_merge_reset is not set, legacy reset + * sequence is executed. + * + * Legacy reset sequence has not been implemented yet. + * Any target earlier than SM8150 will need it and when + * WB support is added to those targets will need to add + * the legacy teardown sequence as well. + */ + if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG)) + dpu_encoder_helper_phys_cleanup(phys_enc); + + phys_enc->enable_state = DPU_ENC_DISABLED; +} + +/** + * dpu_encoder_phys_wb_destroy - destroy writeback encoder + * @phys_enc: Pointer to physical encoder + */ +static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc) +{ + if (!phys_enc) + return; + + DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0); + + kfree(phys_enc); +} + +static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc, + struct drm_writeback_job *job) +{ + const struct msm_format *format; + struct msm_gem_address_space *aspace; + struct dpu_hw_wb_cfg *wb_cfg; + int ret; + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + + if (!job->fb) + return; + + wb_enc->wb_job = job; + wb_enc->wb_conn = job->connector; + aspace = phys_enc->dpu_kms->base.aspace; + + wb_cfg = &wb_enc->wb_cfg; + + memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg)); + + ret = msm_framebuffer_prepare(job->fb, aspace, false); + if (ret) { + DPU_ERROR("prep fb failed, %d\n", ret); + return; + } + + format = msm_framebuffer_format(job->fb); + + wb_cfg->dest.format = dpu_get_dpu_format_ext( + format->pixel_format, job->fb->modifier); + if (!wb_cfg->dest.format) { + /* this error should be detected during atomic_check */ + DPU_ERROR("failed to get format %x\n", format->pixel_format); + return; + } + + ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest); + if (ret) { + DPU_DEBUG("failed to populate layout %d\n", ret); + return; + } + + wb_cfg->dest.width = job->fb->width; + wb_cfg->dest.height = job->fb->height; + wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes; + + if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) && + (wb_cfg->dest.format->element[0] == C1_B_Cb)) + swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]); + + DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n", + wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_addr[1], + wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_addr[3]); + + DPU_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n", + wb_cfg->dest.plane_pitch[0], wb_cfg->dest.plane_pitch[1], + wb_cfg->dest.plane_pitch[2], wb_cfg->dest.plane_pitch[3]); +} + +static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc, + struct drm_writeback_job *job) +{ + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + struct msm_gem_address_space *aspace; + + if (!job->fb) + return; + + aspace = phys_enc->dpu_kms->base.aspace; + + msm_framebuffer_cleanup(job->fb, aspace, false); + wb_enc->wb_job = NULL; + wb_enc->wb_conn = NULL; +} + +static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phys_enc) +{ + struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc); + + if (wb_enc->wb_job) + return true; + else + return false; +} + +/** + * dpu_encoder_phys_wb_init_ops - initialize writeback operations + * @ops: Pointer to encoder operation table + */ +static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops) +{ + ops->is_master = dpu_encoder_phys_wb_is_master; + ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set; + ops->enable = dpu_encoder_phys_wb_enable; + ops->disable = dpu_encoder_phys_wb_disable; + ops->destroy = dpu_encoder_phys_wb_destroy; + ops->atomic_check = dpu_encoder_phys_wb_atomic_check; + ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done; + ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff; + ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff; + ops->needs_single_flush = dpu_encoder_phys_wb_needs_single_flush; + ops->trigger_start = dpu_encoder_helper_trigger_start; + ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job; + ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job; + ops->irq_control = dpu_encoder_phys_wb_irq_ctrl; + ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit; + +} + +/** + * dpu_encoder_phys_wb_init - initialize writeback encoder + * @p: Pointer to init info structure with initialization params + */ +struct dpu_encoder_phys *dpu_encoder_phys_wb_init( + struct dpu_enc_phys_init_params *p) +{ + struct dpu_encoder_phys *phys_enc = NULL; + struct dpu_encoder_phys_wb *wb_enc = NULL; + int ret = 0; + int i; + + DPU_DEBUG("\n"); + + if (!p || !p->parent) { + DPU_ERROR("invalid params\n"); + ret = -EINVAL; + goto fail_alloc; + } + + wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL); + if (!wb_enc) { + DPU_ERROR("failed to allocate wb phys_enc enc\n"); + ret = -ENOMEM; + goto fail_alloc; + } + + phys_enc = &wb_enc->base; + phys_enc->hw_mdptop = p->dpu_kms->hw_mdp; + phys_enc->wb_idx = p->wb_idx; + + dpu_encoder_phys_wb_init_ops(&phys_enc->ops); + phys_enc->parent = p->parent; + phys_enc->parent_ops = p->parent_ops; + phys_enc->dpu_kms = p->dpu_kms; + phys_enc->split_role = p->split_role; + phys_enc->intf_mode = INTF_MODE_WB_LINE; + phys_enc->wb_idx = p->wb_idx; + phys_enc->enc_spinlock = p->enc_spinlock; + + atomic_set(&wb_enc->wbirq_refcount, 0); + + for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++) + phys_enc->irq[i] = -EINVAL; + + atomic_set(&phys_enc->pending_kickoff_cnt, 0); + atomic_set(&phys_enc->vblank_refcount, 0); + wb_enc->wb_done_timeout_cnt = 0; + + init_waitqueue_head(&phys_enc->pending_kickoff_wq); + phys_enc->enable_state = DPU_ENC_DISABLED; + + DPU_DEBUG("Created dpu_encoder_phys for wb %d\n", + phys_enc->wb_idx); + + return phys_enc; + +fail_alloc: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c new file mode 100644 index 000000000..f436a1f34 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c @@ -0,0 +1,1043 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include +#include + +#include "msm_media_info.h" +#include "dpu_kms.h" +#include "dpu_formats.h" + +#define DPU_UBWC_META_MACRO_W_H 16 +#define DPU_UBWC_META_BLOCK_SIZE 256 +#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096 + +#define DPU_TILE_HEIGHT_DEFAULT 1 +#define DPU_TILE_HEIGHT_TILED 4 +#define DPU_TILE_HEIGHT_UBWC 4 +#define DPU_TILE_HEIGHT_NV12 8 + +#define DPU_MAX_IMG_WIDTH 0x3FFF +#define DPU_MAX_IMG_HEIGHT 0x3FFF + +/* + * DPU supported format packing, bpp, and other format + * information. + * DPU currently only supports interleaved RGB formats + * UBWC support for a pixel format is indicated by the flag, + * there is additional meta data plane for such formats + */ + +#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \ +bp, flg, fm, np) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3) }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = DPU_CHROMA_RGB, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = uc, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = DPU_TILE_HEIGHT_DEFAULT \ +} + +#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \ +alpha, bp, flg, fm, np, th) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3) }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = DPU_CHROMA_RGB, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = uc, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = th \ +} + + +#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \ +alpha, chroma, count, bp, flg, fm, np) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_INTERLEAVED, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), (e3)}, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = count, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = DPU_TILE_HEIGHT_DEFAULT \ +} + +#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = DPU_TILE_HEIGHT_DEFAULT \ +} + +#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \ +flg, fm, np, th) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = th \ +} + +#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 1, \ + .unpack_tight = 0, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = DPU_TILE_HEIGHT_DEFAULT \ +} + +#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \ +flg, fm, np, th) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \ + .alpha_enable = false, \ + .element = { (e0), (e1), 0, 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 1, \ + .unpack_tight = 0, \ + .unpack_count = 2, \ + .bpp = 2, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = th \ +} + + +#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \ +flg, fm, np) \ +{ \ + .base.pixel_format = DRM_FORMAT_ ## fmt, \ + .fetch_planes = DPU_PLANE_PLANAR, \ + .alpha_enable = alpha, \ + .element = { (e0), (e1), (e2), 0 }, \ + .bits = { g, b, r, a }, \ + .chroma_sample = chroma, \ + .unpack_align_msb = 0, \ + .unpack_tight = 1, \ + .unpack_count = 1, \ + .bpp = bp, \ + .fetch_mode = fm, \ + .flag = {(flg)}, \ + .num_planes = np, \ + .tile_height = DPU_TILE_HEIGHT_DEFAULT \ +} + +/* + * struct dpu_media_color_map - maps drm format to media format + * @format: DRM base pixel format + * @color: Media API color related to DRM format + */ +struct dpu_media_color_map { + uint32_t format; + uint32_t color; +}; + +static const struct dpu_format dpu_format_map[] = { + INTERLEAVED_RGB_FMT(ARGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 4, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGB888, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + false, 3, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGR888, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + false, 3, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGB565, + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGR565, + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ARGB1555, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR1555, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA5551, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA5551, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB1555, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR1555, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX5551, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX5551, + COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ARGB4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX4444, + COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 2, 0, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRA1010102, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + true, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBA1010102, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + true, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ABGR2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(ARGB2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + true, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XRGB2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4, + false, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(BGRX1010102, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4, + false, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(XBGR2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + INTERLEAVED_RGB_FMT(RGBX1010102, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4, + false, 4, DPU_FORMAT_FLAG_DX, + DPU_FETCH_LINEAR, 1), + + PSEUDO_YUV_FMT(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + PSEUDO_YUV_FMT(NV21, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, + DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + PSEUDO_YUV_FMT(NV16, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + PSEUDO_YUV_FMT(NV61, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, + DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + INTERLEAVED_YUV_FMT(VYUY, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y, + false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + INTERLEAVED_YUV_FMT(UYVY, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y, + false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + INTERLEAVED_YUV_FMT(YUYV, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr, + false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + INTERLEAVED_YUV_FMT(YVYU, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb, + false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 2), + + PLANAR_YUV_FMT(YUV420, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C1_B_Cb, C0_G_Y, + false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 3), + + PLANAR_YUV_FMT(YVU420, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, C0_G_Y, + false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV, + DPU_FETCH_LINEAR, 3), +}; + +/* + * UBWC formats table: + * This table holds the UBWC formats supported. + * If a compression ratio needs to be used for this or any other format, + * the data will be passed by user-space. + */ +static const struct dpu_format dpu_format_map_ubwc[] = { + INTERLEAVED_RGB_FMT_TILED(BGR565, + 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, + false, 2, DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(ABGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + + /* ARGB8888 and ABGR8888 purposely have the same color + * ordering. The hardware only supports ABGR8888 UBWC + * natively. + */ + INTERLEAVED_RGB_FMT_TILED(ARGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(XBGR8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(XRGB8888, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + false, 4, DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(ABGR2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + + INTERLEAVED_RGB_FMT_TILED(XBGR2101010, + COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4, + true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC), + + PSEUDO_YUV_FMT_TILED(NV12, + 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, + C1_B_Cb, C2_R_Cr, + DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV | + DPU_FORMAT_FLAG_COMPRESSED, + DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12), +}; + +/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support + * Note: Not using the drm_format_*_subsampling since we have formats + */ +static void _dpu_get_v_h_subsample_rate( + enum dpu_chroma_samp_type chroma_sample, + uint32_t *v_sample, + uint32_t *h_sample) +{ + if (!v_sample || !h_sample) + return; + + switch (chroma_sample) { + case DPU_CHROMA_H2V1: + *v_sample = 1; + *h_sample = 2; + break; + case DPU_CHROMA_H1V2: + *v_sample = 2; + *h_sample = 1; + break; + case DPU_CHROMA_420: + *v_sample = 2; + *h_sample = 2; + break; + default: + *v_sample = 1; + *h_sample = 1; + break; + } +} + +static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt) +{ + static const struct dpu_media_color_map dpu_media_ubwc_map[] = { + {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_ARGB8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_XRGB8888, COLOR_FMT_RGBA8888_UBWC}, + {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC}, + {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC}, + {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC}, + }; + int color_fmt = -1; + int i; + + if (fmt->base.pixel_format == DRM_FORMAT_NV12) { + if (DPU_FORMAT_IS_DX(fmt)) { + if (fmt->unpack_tight) + color_fmt = COLOR_FMT_NV12_BPP10_UBWC; + else + color_fmt = COLOR_FMT_P010_UBWC; + } else + color_fmt = COLOR_FMT_NV12_UBWC; + return color_fmt; + } + + for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i) + if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) { + color_fmt = dpu_media_ubwc_map[i].color; + break; + } + return color_fmt; +} + +static int _dpu_format_get_plane_sizes_ubwc( + const struct dpu_format *fmt, + const uint32_t width, + const uint32_t height, + struct dpu_hw_fmt_layout *layout) +{ + int i; + int color; + bool meta = DPU_FORMAT_IS_UBWC(fmt); + + memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); + layout->format = fmt; + layout->width = width; + layout->height = height; + layout->num_planes = fmt->num_planes; + + color = _dpu_format_get_media_color_ubwc(fmt); + if (color < 0) { + DRM_ERROR("UBWC format not supported for fmt: %4.4s\n", + (char *)&fmt->base.pixel_format); + return -EINVAL; + } + + if (DPU_FORMAT_IS_YUV(layout->format)) { + uint32_t y_sclines, uv_sclines; + uint32_t y_meta_scanlines = 0; + uint32_t uv_meta_scanlines = 0; + + layout->num_planes = 2; + layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width); + y_sclines = VENUS_Y_SCANLINES(color, height); + layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * + y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); + + layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width); + uv_sclines = VENUS_UV_SCANLINES(color, height); + layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] * + uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); + + if (!meta) + goto done; + + layout->num_planes += 2; + layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width); + y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height); + layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * + y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); + + layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width); + uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height); + layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] * + uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); + + } else { + uint32_t rgb_scanlines, rgb_meta_scanlines; + + layout->num_planes = 1; + + layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width); + rgb_scanlines = VENUS_RGB_SCANLINES(color, height); + layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] * + rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); + + if (!meta) + goto done; + layout->num_planes += 2; + layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width); + rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height); + layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] * + rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT); + } + +done: + for (i = 0; i < DPU_MAX_PLANES; i++) + layout->total_size += layout->plane_size[i]; + + return 0; +} + +static int _dpu_format_get_plane_sizes_linear( + const struct dpu_format *fmt, + const uint32_t width, + const uint32_t height, + struct dpu_hw_fmt_layout *layout, + const uint32_t *pitches) +{ + int i; + + memset(layout, 0, sizeof(struct dpu_hw_fmt_layout)); + layout->format = fmt; + layout->width = width; + layout->height = height; + layout->num_planes = fmt->num_planes; + + /* Due to memset above, only need to set planes of interest */ + if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) { + layout->num_planes = 1; + layout->plane_size[0] = width * height * layout->format->bpp; + layout->plane_pitch[0] = width * layout->format->bpp; + } else { + uint32_t v_subsample, h_subsample; + uint32_t chroma_samp; + uint32_t bpp = 1; + + chroma_samp = fmt->chroma_sample; + _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample, + &h_subsample); + + if (width % h_subsample || height % v_subsample) { + DRM_ERROR("mismatch in subsample vs dimensions\n"); + return -EINVAL; + } + + if ((fmt->base.pixel_format == DRM_FORMAT_NV12) && + (DPU_FORMAT_IS_DX(fmt))) + bpp = 2; + layout->plane_pitch[0] = width * bpp; + layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample; + layout->plane_size[0] = layout->plane_pitch[0] * height; + layout->plane_size[1] = layout->plane_pitch[1] * + (height / v_subsample); + + if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) { + layout->num_planes = 2; + layout->plane_size[1] *= 2; + layout->plane_pitch[1] *= 2; + } else { + /* planar */ + layout->num_planes = 3; + layout->plane_size[2] = layout->plane_size[1]; + layout->plane_pitch[2] = layout->plane_pitch[1]; + } + } + + /* + * linear format: allow user allocated pitches if they are greater than + * the requirement. + * ubwc format: pitch values are computed uniformly across + * all the components based on ubwc specifications. + */ + for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) { + if (pitches && layout->plane_pitch[i] < pitches[i]) + layout->plane_pitch[i] = pitches[i]; + } + + for (i = 0; i < DPU_MAX_PLANES; i++) + layout->total_size += layout->plane_size[i]; + + return 0; +} + +static int dpu_format_get_plane_sizes( + const struct dpu_format *fmt, + const uint32_t w, + const uint32_t h, + struct dpu_hw_fmt_layout *layout, + const uint32_t *pitches) +{ + if (!layout || !fmt) { + DRM_ERROR("invalid pointer\n"); + return -EINVAL; + } + + if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) { + DRM_ERROR("image dimensions outside max range\n"); + return -ERANGE; + } + + if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt)) + return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout); + + return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches); +} + +static int _dpu_format_populate_addrs_ubwc( + struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) +{ + uint32_t base_addr = 0; + bool meta; + + if (!fb || !layout) { + DRM_ERROR("invalid pointers\n"); + return -EINVAL; + } + + if (aspace) + base_addr = msm_framebuffer_iova(fb, aspace, 0); + if (!base_addr) { + DRM_ERROR("failed to retrieve base addr\n"); + return -EFAULT; + } + + meta = DPU_FORMAT_IS_UBWC(layout->format); + + /* Per-format logic for verifying active planes */ + if (DPU_FORMAT_IS_YUV(layout->format)) { + /************************************************/ + /* UBWC ** */ + /* buffer ** DPU PLANE */ + /* format ** */ + /************************************************/ + /* ------------------- ** -------------------- */ + /* | Y meta | ** | Y bitstream | */ + /* | data | ** | plane | */ + /* ------------------- ** -------------------- */ + /* | Y bitstream | ** | CbCr bitstream | */ + /* | data | ** | plane | */ + /* ------------------- ** -------------------- */ + /* | Cbcr metadata | ** | Y meta | */ + /* | data | ** | plane | */ + /* ------------------- ** -------------------- */ + /* | CbCr bitstream | ** | CbCr meta | */ + /* | data | ** | plane | */ + /* ------------------- ** -------------------- */ + /************************************************/ + + /* configure Y bitstream plane */ + layout->plane_addr[0] = base_addr + layout->plane_size[2]; + + /* configure CbCr bitstream plane */ + layout->plane_addr[1] = base_addr + layout->plane_size[0] + + layout->plane_size[2] + layout->plane_size[3]; + + if (!meta) + return 0; + + /* configure Y metadata plane */ + layout->plane_addr[2] = base_addr; + + /* configure CbCr metadata plane */ + layout->plane_addr[3] = base_addr + layout->plane_size[0] + + layout->plane_size[2]; + + } else { + /************************************************/ + /* UBWC ** */ + /* buffer ** DPU PLANE */ + /* format ** */ + /************************************************/ + /* ------------------- ** -------------------- */ + /* | RGB meta | ** | RGB bitstream | */ + /* | data | ** | plane | */ + /* ------------------- ** -------------------- */ + /* | RGB bitstream | ** | NONE | */ + /* | data | ** | | */ + /* ------------------- ** -------------------- */ + /* ** | RGB meta | */ + /* ** | plane | */ + /* ** -------------------- */ + /************************************************/ + + layout->plane_addr[0] = base_addr + layout->plane_size[2]; + layout->plane_addr[1] = 0; + + if (!meta) + return 0; + + layout->plane_addr[2] = base_addr; + layout->plane_addr[3] = 0; + } + return 0; +} + +static int _dpu_format_populate_addrs_linear( + struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) +{ + unsigned int i; + + /* Can now check the pitches given vs pitches expected */ + for (i = 0; i < layout->num_planes; ++i) { + if (layout->plane_pitch[i] > fb->pitches[i]) { + DRM_ERROR("plane %u expected pitch %u, fb %u\n", + i, layout->plane_pitch[i], fb->pitches[i]); + return -EINVAL; + } + } + + /* Populate addresses for simple formats here */ + for (i = 0; i < layout->num_planes; ++i) { + if (aspace) + layout->plane_addr[i] = + msm_framebuffer_iova(fb, aspace, i); + if (!layout->plane_addr[i]) { + DRM_ERROR("failed to retrieve base addr\n"); + return -EFAULT; + } + } + + return 0; +} + +int dpu_format_populate_layout( + struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *layout) +{ + uint32_t plane_addr[DPU_MAX_PLANES]; + int i, ret; + + if (!fb || !layout) { + DRM_ERROR("invalid arguments\n"); + return -EINVAL; + } + + if ((fb->width > DPU_MAX_IMG_WIDTH) || + (fb->height > DPU_MAX_IMG_HEIGHT)) { + DRM_ERROR("image dimensions outside max range\n"); + return -ERANGE; + } + + layout->format = to_dpu_format(msm_framebuffer_format(fb)); + + /* Populate the plane sizes etc via get_format */ + ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height, + layout, fb->pitches); + if (ret) + return ret; + + for (i = 0; i < DPU_MAX_PLANES; ++i) + plane_addr[i] = layout->plane_addr[i]; + + /* Populate the addresses given the fb */ + if (DPU_FORMAT_IS_UBWC(layout->format) || + DPU_FORMAT_IS_TILE(layout->format)) + ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout); + else + ret = _dpu_format_populate_addrs_linear(aspace, fb, layout); + + /* check if anything changed */ + if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr))) + ret = -EAGAIN; + + return ret; +} + +int dpu_format_check_modified_format( + const struct msm_kms *kms, + const struct msm_format *msm_fmt, + const struct drm_mode_fb_cmd2 *cmd, + struct drm_gem_object **bos) +{ + const struct drm_format_info *info; + const struct dpu_format *fmt; + struct dpu_hw_fmt_layout layout; + uint32_t bos_total_size = 0; + int ret, i; + + if (!msm_fmt || !cmd || !bos) { + DRM_ERROR("invalid arguments\n"); + return -EINVAL; + } + + fmt = to_dpu_format(msm_fmt); + info = drm_format_info(fmt->base.pixel_format); + if (!info) + return -EINVAL; + + ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height, + &layout, cmd->pitches); + if (ret) + return ret; + + for (i = 0; i < info->num_planes; i++) { + if (!bos[i]) { + DRM_ERROR("invalid handle for plane %d\n", i); + return -EINVAL; + } + if ((i == 0) || (bos[i] != bos[0])) + bos_total_size += bos[i]->size; + } + + if (bos_total_size < layout.total_size) { + DRM_ERROR("buffers total size too small %u expected %u\n", + bos_total_size, layout.total_size); + return -EINVAL; + } + + return 0; +} + +const struct dpu_format *dpu_get_dpu_format_ext( + const uint32_t format, + const uint64_t modifier) +{ + uint32_t i = 0; + const struct dpu_format *fmt = NULL; + const struct dpu_format *map = NULL; + ssize_t map_size = 0; + + /* + * Currently only support exactly zero or one modifier. + * All planes use the same modifier. + */ + DRM_DEBUG_ATOMIC("plane format modifier 0x%llX\n", modifier); + + switch (modifier) { + case 0: + map = dpu_format_map; + map_size = ARRAY_SIZE(dpu_format_map); + break; + case DRM_FORMAT_MOD_QCOM_COMPRESSED: + map = dpu_format_map_ubwc; + map_size = ARRAY_SIZE(dpu_format_map_ubwc); + DRM_DEBUG_ATOMIC("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n", + (char *)&format); + break; + default: + DPU_ERROR("unsupported format modifier %llX\n", modifier); + return NULL; + } + + for (i = 0; i < map_size; i++) { + if (format == map[i].base.pixel_format) { + fmt = &map[i]; + break; + } + } + + if (fmt == NULL) + DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n", + (char *)&format, modifier); + else + DRM_DEBUG_ATOMIC("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n", + (char *)&format, modifier, + DPU_FORMAT_IS_UBWC(fmt), + DPU_FORMAT_IS_YUV(fmt)); + + return fmt; +} + +const struct msm_format *dpu_get_msm_format( + struct msm_kms *kms, + const uint32_t format, + const uint64_t modifiers) +{ + const struct dpu_format *fmt = dpu_get_dpu_format_ext(format, + modifiers); + if (fmt) + return &fmt->base; + return NULL; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h new file mode 100644 index 000000000..84b8b3289 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_FORMATS_H +#define _DPU_FORMATS_H + +#include +#include "msm_gem.h" +#include "dpu_hw_mdss.h" + +/** + * dpu_get_dpu_format_ext() - Returns dpu format structure pointer. + * @format: DRM FourCC Code + * @modifiers: format modifier array from client, one per plane + */ +const struct dpu_format *dpu_get_dpu_format_ext( + const uint32_t format, + const uint64_t modifier); + +#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0) + +/** + * dpu_find_format - validate if the pixel format is supported + * @format: dpu format + * @supported_formats: supported formats by dpu HW + * @num_formatss: total number of formats + * + * Return: false if not valid format, true on success + */ +static inline bool dpu_find_format(u32 format, const u32 *supported_formats, + size_t num_formats) +{ + int i; + + for (i = 0; i < num_formats; i++) { + /* check for valid formats supported */ + if (format == supported_formats[i]) + return true; + } + + return false; +} + +/** + * dpu_get_msm_format - get an dpu_format by its msm_format base + * callback function registers with the msm_kms layer + * @kms: kms driver + * @format: DRM FourCC Code + * @modifiers: data layout modifier + */ +const struct msm_format *dpu_get_msm_format( + struct msm_kms *kms, + const uint32_t format, + const uint64_t modifiers); + +/** + * dpu_format_check_modified_format - validate format and buffers for + * dpu non-standard, i.e. modified format + * @kms: kms driver + * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format + * @cmd: fb_cmd2 structure user request + * @bos: gem buffer object list + * + * Return: error code on failure, 0 on success + */ +int dpu_format_check_modified_format( + const struct msm_kms *kms, + const struct msm_format *msm_fmt, + const struct drm_mode_fb_cmd2 *cmd, + struct drm_gem_object **bos); + +/** + * dpu_format_populate_layout - populate the given format layout based on + * mmu, fb, and format found in the fb + * @aspace: address space pointer + * @fb: framebuffer pointer + * @fmtl: format layout structure to populate + * + * Return: error code on failure, -EAGAIN if success but the addresses + * are the same as before or 0 if new addresses were populated + */ +int dpu_format_populate_layout( + struct msm_gem_address_space *aspace, + struct drm_framebuffer *fb, + struct dpu_hw_fmt_layout *fmtl); + +#endif /*_DPU_FORMATS_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c new file mode 100644 index 000000000..b2f330e99 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -0,0 +1,1959 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ +#include +#include +#include +#include "dpu_hw_mdss.h" +#include "dpu_hw_interrupts.h" +#include "dpu_hw_catalog.h" +#include "dpu_kms.h" + +#define VIG_BASE_MASK \ + (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ + BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) + +#define VIG_MASK \ + (VIG_BASE_MASK | \ + BIT(DPU_SSPP_CSC_10BIT)) + +#define VIG_MSM8998_MASK \ + (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) + +#define VIG_SDM845_MASK \ + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3)) + +#define VIG_SC7180_MASK \ + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4)) + +#define VIG_SM8250_MASK \ + (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE)) + +#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) + +#define DMA_MSM8998_MASK \ + (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ + BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) + +#define VIG_SC7280_MASK \ + (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION)) + +#define DMA_SDM845_MASK \ + (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\ + BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\ + BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT)) + +#define DMA_CURSOR_SDM845_MASK \ + (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR)) + +#define DMA_CURSOR_MSM8998_MASK \ + (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR)) + +#define MIXER_MSM8998_MASK \ + (BIT(DPU_MIXER_SOURCESPLIT)) + +#define MIXER_SDM845_MASK \ + (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA)) + +#define MIXER_SC7180_MASK \ + (BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA)) + +#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER) + +#define PINGPONG_SDM845_SPLIT_MASK \ + (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2)) + +#define CTL_SC7280_MASK \ + (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE) | BIT(DPU_CTL_VM_CFG)) + +#define MERGE_3D_SM8150_MASK (0) + +#define DSPP_MSM8998_MASK BIT(DPU_DSPP_PCC) | BIT(DPU_DSPP_GC) + +#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC) + +#define INTF_SDM845_MASK (0) + +#define INTF_SC7180_MASK \ + (BIT(DPU_INTF_INPUT_CTRL) | \ + BIT(DPU_INTF_TE) | \ + BIT(DPU_INTF_STATUS_SUPPORTED) | \ + BIT(DPU_DATA_HCTL_EN)) + +#define INTF_SC7280_MASK (INTF_SC7180_MASK) + +#define IRQ_SDM845_MASK (BIT(MDP_SSPP_TOP0_INTR) | \ + BIT(MDP_SSPP_TOP0_INTR2) | \ + BIT(MDP_SSPP_TOP0_HIST_INTR) | \ + BIT(MDP_INTF0_INTR) | \ + BIT(MDP_INTF1_INTR) | \ + BIT(MDP_INTF2_INTR) | \ + BIT(MDP_INTF3_INTR) | \ + BIT(MDP_INTF4_INTR) | \ + BIT(MDP_AD4_0_INTR) | \ + BIT(MDP_AD4_1_INTR)) + +#define IRQ_SC7180_MASK (BIT(MDP_SSPP_TOP0_INTR) | \ + BIT(MDP_SSPP_TOP0_INTR2) | \ + BIT(MDP_SSPP_TOP0_HIST_INTR) | \ + BIT(MDP_INTF0_INTR) | \ + BIT(MDP_INTF1_INTR)) + +#define IRQ_SC7280_MASK (BIT(MDP_SSPP_TOP0_INTR) | \ + BIT(MDP_SSPP_TOP0_INTR2) | \ + BIT(MDP_SSPP_TOP0_HIST_INTR) | \ + BIT(MDP_INTF0_7xxx_INTR) | \ + BIT(MDP_INTF1_7xxx_INTR) | \ + BIT(MDP_INTF5_7xxx_INTR)) + +#define IRQ_SM8250_MASK (BIT(MDP_SSPP_TOP0_INTR) | \ + BIT(MDP_SSPP_TOP0_INTR2) | \ + BIT(MDP_SSPP_TOP0_HIST_INTR) | \ + BIT(MDP_INTF0_INTR) | \ + BIT(MDP_INTF1_INTR) | \ + BIT(MDP_INTF2_INTR) | \ + BIT(MDP_INTF3_INTR) | \ + BIT(MDP_INTF4_INTR)) + +#define IRQ_SC8180X_MASK (BIT(MDP_SSPP_TOP0_INTR) | \ + BIT(MDP_SSPP_TOP0_INTR2) | \ + BIT(MDP_SSPP_TOP0_HIST_INTR) | \ + BIT(MDP_INTF0_INTR) | \ + BIT(MDP_INTF1_INTR) | \ + BIT(MDP_INTF2_INTR) | \ + BIT(MDP_INTF3_INTR) | \ + BIT(MDP_INTF4_INTR) | \ + BIT(MDP_INTF5_INTR) | \ + BIT(MDP_AD4_0_INTR) | \ + BIT(MDP_AD4_1_INTR)) + +#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \ + BIT(DPU_WB_UBWC) | \ + BIT(DPU_WB_YUV_CONFIG) | \ + BIT(DPU_WB_PIPE_ALPHA) | \ + BIT(DPU_WB_XY_ROI_OFFSET) | \ + BIT(DPU_WB_QOS) | \ + BIT(DPU_WB_QOS_8LVL) | \ + BIT(DPU_WB_CDP) | \ + BIT(DPU_WB_INPUT_CTRL)) + +#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024) +#define DEFAULT_DPU_LINE_WIDTH 2048 +#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560 + +#define MAX_HORZ_DECIMATION 4 +#define MAX_VERT_DECIMATION 4 + +#define MAX_UPSCALE_RATIO 20 +#define MAX_DOWNSCALE_RATIO 4 +#define SSPP_UNITY_SCALE 1 + +#define STRCAT(X, Y) (X Y) + +static const uint32_t plane_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, +}; + +static const uint32_t plane_formats_yuv[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_VYUY, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, + DRM_FORMAT_YVYU, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, +}; + +static const u32 rotation_v2_formats[] = { + DRM_FORMAT_NV12, + /* TODO add formats after validation */ +}; + +static const uint32_t wb2_formats[] = { + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_RGB888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_BGR565, + DRM_FORMAT_BGR888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_XBGR4444, +}; + +/************************************************************* + * DPU sub blocks config + *************************************************************/ +/* DPU top level caps */ +static const struct dpu_caps msm8998_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x7, + .qseed_type = DPU_SSPP_SCALER_QSEED3, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V1, + .ubwc_version = DPU_HW_UBWC_VER_10, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_caps qcm2290_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x4, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, + .has_dim_layer = true, + .has_idle_pc = true, + .max_linewidth = 2160, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_caps sdm845_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .qseed_type = DPU_SSPP_SCALER_QSEED3, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, + .ubwc_version = DPU_HW_UBWC_VER_20, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_caps sc7180_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x9, + .qseed_type = DPU_SSPP_SCALER_QSEED4, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, + .ubwc_version = DPU_HW_UBWC_VER_20, + .has_dim_layer = true, + .has_idle_pc = true, + .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_caps sm8150_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .qseed_type = DPU_SSPP_SCALER_QSEED3, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ + .ubwc_version = DPU_HW_UBWC_VER_30, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 4096, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_caps sc8180x_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .qseed_type = DPU_SSPP_SCALER_QSEED3, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ + .ubwc_version = DPU_HW_UBWC_VER_30, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 4096, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, + .max_hdeci_exp = MAX_HORZ_DECIMATION, + .max_vdeci_exp = MAX_VERT_DECIMATION, +}; + +static const struct dpu_caps sm8250_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0xb, + .qseed_type = DPU_SSPP_SCALER_QSEED4, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ + .ubwc_version = DPU_HW_UBWC_VER_40, + .has_src_split = true, + .has_dim_layer = true, + .has_idle_pc = true, + .has_3d_merge = true, + .max_linewidth = 4096, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_caps sc7280_dpu_caps = { + .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_blendstages = 0x7, + .qseed_type = DPU_SSPP_SCALER_QSEED4, + .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, + .ubwc_version = DPU_HW_UBWC_VER_30, + .has_dim_layer = true, + .has_idle_pc = true, + .max_linewidth = 2400, + .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE, +}; + +static const struct dpu_mdp_cfg msm8998_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x458, + .features = 0, + .highest_bank_bit = 0x2, + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG1] = { + .reg_off = 0x2B4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG2] = { + .reg_off = 0x2BC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG3] = { + .reg_off = 0x2C4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { + .reg_off = 0x2B4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { + .reg_off = 0x2C4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA3] = { + .reg_off = 0x2C4, .bit_off = 12}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .reg_off = 0x3A8, .bit_off = 15}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .reg_off = 0x3B0, .bit_off = 15}, + }, +}; + +static const struct dpu_mdp_cfg sdm845_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x45C, + .features = BIT(DPU_MDP_AUDIO_SELECT), + .highest_bank_bit = 0x2, + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG1] = { + .reg_off = 0x2B4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG2] = { + .reg_off = 0x2BC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG3] = { + .reg_off = 0x2C4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { + .reg_off = 0x2B4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .reg_off = 0x2BC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .reg_off = 0x2C4, .bit_off = 8}, + }, +}; + +static const struct dpu_mdp_cfg sc7180_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x494, + .features = 0, + .highest_bank_bit = 0x3, + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .reg_off = 0x2B4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .reg_off = 0x2C4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_WB2] = { + .reg_off = 0x3B8, .bit_off = 24}, + }, +}; + +static const struct dpu_mdp_cfg sc8180x_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x45C, + .features = 0, + .highest_bank_bit = 0x3, + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG1] = { + .reg_off = 0x2B4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG2] = { + .reg_off = 0x2BC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG3] = { + .reg_off = 0x2C4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { + .reg_off = 0x2B4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .reg_off = 0x2BC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .reg_off = 0x2C4, .bit_off = 8}, + }, +}; + +static const struct dpu_mdp_cfg sm8250_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x494, + .features = 0, + .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */ + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG1] = { + .reg_off = 0x2B4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG2] = { + .reg_off = 0x2BC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_VIG3] = { + .reg_off = 0x2C4, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { + .reg_off = 0x2B4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .reg_off = 0x2BC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .reg_off = 0x2C4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { + .reg_off = 0x2BC, .bit_off = 20}, + .clk_ctrls[DPU_CLK_CTRL_WB2] = { + .reg_off = 0x3B8, .bit_off = 24}, + }, +}; + +static const struct dpu_mdp_cfg sc7280_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x2014, + .highest_bank_bit = 0x1, + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .reg_off = 0x2B4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .reg_off = 0x2C4, .bit_off = 8}, + }, +}; + +static const struct dpu_mdp_cfg qcm2290_mdp[] = { + { + .name = "top_0", .id = MDP_TOP, + .base = 0x0, .len = 0x494, + .features = 0, + .highest_bank_bit = 0x2, + .clk_ctrls[DPU_CLK_CTRL_VIG0] = { + .reg_off = 0x2AC, .bit_off = 0}, + .clk_ctrls[DPU_CLK_CTRL_DMA0] = { + .reg_off = 0x2AC, .bit_off = 8}, + }, +}; + +/************************************************************* + * CTL sub blocks config + *************************************************************/ +static const struct dpu_ctl_cfg msm8998_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x94, + .features = BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, + { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x94, + .features = 0, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, + { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x94, + .features = BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, + { + .name = "ctl_3", .id = CTL_3, + .base = 0x1600, .len = 0x94, + .features = 0, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, + { + .name = "ctl_4", .id = CTL_4, + .base = 0x1800, .len = 0x94, + .features = 0, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, +}; + +static const struct dpu_ctl_cfg sdm845_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0xE4, + .features = BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, + { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0xE4, + .features = BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, + { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0xE4, + .features = 0, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, + { + .name = "ctl_3", .id = CTL_3, + .base = 0x1600, .len = 0xE4, + .features = 0, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, + { + .name = "ctl_4", .id = CTL_4, + .base = 0x1800, .len = 0xE4, + .features = 0, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, +}; + +static const struct dpu_ctl_cfg sc7180_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x1dc, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, + { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x1dc, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, + { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x1dc, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, +}; + +static const struct dpu_ctl_cfg sm8150_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x1e0, + .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, + { + .name = "ctl_1", .id = CTL_1, + .base = 0x1200, .len = 0x1e0, + .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, + { + .name = "ctl_2", .id = CTL_2, + .base = 0x1400, .len = 0x1e0, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, + { + .name = "ctl_3", .id = CTL_3, + .base = 0x1600, .len = 0x1e0, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, + { + .name = "ctl_4", .id = CTL_4, + .base = 0x1800, .len = 0x1e0, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13), + }, + { + .name = "ctl_5", .id = CTL_5, + .base = 0x1a00, .len = 0x1e0, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23), + }, +}; + +static const struct dpu_ctl_cfg sc7280_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x15000, .len = 0x1E8, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, + { + .name = "ctl_1", .id = CTL_1, + .base = 0x16000, .len = 0x1E8, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), + }, + { + .name = "ctl_2", .id = CTL_2, + .base = 0x17000, .len = 0x1E8, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), + }, + { + .name = "ctl_3", .id = CTL_3, + .base = 0x18000, .len = 0x1E8, + .features = CTL_SC7280_MASK, + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12), + }, +}; + +static const struct dpu_ctl_cfg qcm2290_ctl[] = { + { + .name = "ctl_0", .id = CTL_0, + .base = 0x1000, .len = 0x1dc, + .features = BIT(DPU_CTL_ACTIVE_CFG), + .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), + }, +}; + +/************************************************************* + * SSPP sub blocks config + *************************************************************/ + +/* SSPP common configuration */ +#define _VIG_SBLK(num, sdma_pri, qseed_ver) \ + { \ + .maxdwnscale = MAX_DOWNSCALE_RATIO, \ + .maxupscale = MAX_UPSCALE_RATIO, \ + .smart_dma_priority = sdma_pri, \ + .src_blk = {.name = STRCAT("sspp_src_", num), \ + .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ + .scaler_blk = {.name = STRCAT("sspp_scaler", num), \ + .id = qseed_ver, \ + .base = 0xa00, .len = 0xa0,}, \ + .csc_blk = {.name = STRCAT("sspp_csc", num), \ + .id = DPU_SSPP_CSC_10BIT, \ + .base = 0x1a00, .len = 0x100,}, \ + .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .virt_format_list = plane_formats, \ + .virt_num_formats = ARRAY_SIZE(plane_formats), \ + .rotation_cfg = NULL, \ + } + +#define _VIG_SBLK_ROT(num, sdma_pri, qseed_ver, rot_cfg) \ + { \ + .maxdwnscale = MAX_DOWNSCALE_RATIO, \ + .maxupscale = MAX_UPSCALE_RATIO, \ + .smart_dma_priority = sdma_pri, \ + .src_blk = {.name = STRCAT("sspp_src_", num), \ + .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ + .scaler_blk = {.name = STRCAT("sspp_scaler", num), \ + .id = qseed_ver, \ + .base = 0xa00, .len = 0xa0,}, \ + .csc_blk = {.name = STRCAT("sspp_csc", num), \ + .id = DPU_SSPP_CSC_10BIT, \ + .base = 0x1a00, .len = 0x100,}, \ + .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .virt_format_list = plane_formats, \ + .virt_num_formats = ARRAY_SIZE(plane_formats), \ + .rotation_cfg = rot_cfg, \ + } + +#define _DMA_SBLK(num, sdma_pri) \ + { \ + .maxdwnscale = SSPP_UNITY_SCALE, \ + .maxupscale = SSPP_UNITY_SCALE, \ + .smart_dma_priority = sdma_pri, \ + .src_blk = {.name = STRCAT("sspp_src_", num), \ + .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ + .format_list = plane_formats, \ + .num_formats = ARRAY_SIZE(plane_formats), \ + .virt_format_list = plane_formats, \ + .virt_num_formats = ARRAY_SIZE(plane_formats), \ + } + +static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 = + _VIG_SBLK("0", 0, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 = + _VIG_SBLK("1", 0, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 = + _VIG_SBLK("2", 0, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 = + _VIG_SBLK("3", 0, DPU_SSPP_SCALER_QSEED3); + +static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = { + .rot_maxheight = 1088, + .rot_num_formats = ARRAY_SIZE(rotation_v2_formats), + .rot_format_list = rotation_v2_formats, +}; + +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3); +static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3); + +static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1); +static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2); +static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3); +static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4); + +#define SSPP_BLK(_name, _id, _base, _features, \ + _sblk, _xinid, _type, _clkctrl) \ + { \ + .name = _name, .id = _id, \ + .base = _base, .len = 0x1c8, \ + .features = _features, \ + .sblk = &_sblk, \ + .xin_id = _xinid, \ + .type = _type, \ + .clk_ctrl = _clkctrl \ + } + +static const struct dpu_sspp_cfg msm8998_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_MSM8998_MASK, + msm8998_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_MSM8998_MASK, + msm8998_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), + SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_MSM8998_MASK, + msm8998_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), + SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_MSM8998_MASK, + msm8998_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_MSM8998_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_MSM8998_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_MSM8998_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), + SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_MSM8998_MASK, + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), +}; + +static const struct dpu_sspp_cfg sdm845_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK, + sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK, + sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), + SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SDM845_MASK, + sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), + SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SDM845_MASK, + sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), +}; + +static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 = + _VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4); + +static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 = + _VIG_SBLK_ROT("0", 4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2); + +static const struct dpu_sspp_cfg sc7180_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, + sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), +}; + +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 = + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 = + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 = + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4); +static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 = + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4); + +static const struct dpu_sspp_cfg sm8250_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, + sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK, + sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), + SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK, + sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), + SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK, + sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), +}; + +static const struct dpu_sspp_cfg sc7280_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7280_MASK, + sc7280_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), + SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), +}; + + +#define _VIG_SBLK_NOSCALE(num, sdma_pri) \ + { \ + .maxdwnscale = SSPP_UNITY_SCALE, \ + .maxupscale = SSPP_UNITY_SCALE, \ + .smart_dma_priority = sdma_pri, \ + .src_blk = {.name = STRCAT("sspp_src_", num), \ + .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ + .format_list = plane_formats_yuv, \ + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ + .virt_format_list = plane_formats, \ + .virt_num_formats = ARRAY_SIZE(plane_formats), \ + } + +static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE("0", 2); +static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK("8", 1); + +static const struct dpu_sspp_cfg qcm2290_sspp[] = { + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_QCM2290_MASK, + qcm2290_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), + SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, + qcm2290_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), +}; + +/************************************************************* + * MIXER sub blocks config + *************************************************************/ + +#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \ + { \ + .name = _name, .id = _id, \ + .base = _base, .len = 0x320, \ + .features = _fmask, \ + .sblk = _sblk, \ + .pingpong = _pp, \ + .lm_pair_mask = (1 << _lmpair), \ + .dspp = _dspp \ + } + +/* MSM8998 */ + +static const struct dpu_lm_sub_blks msm8998_lm_sblk = { + .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .maxblendstages = 7, /* excluding base layer */ + .blendstage_base = { /* offsets relative to mixer base */ + 0x20, 0x50, 0x80, 0xb0, 0x230, + 0x260, 0x290 + }, +}; + +static const struct dpu_lm_cfg msm8998_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK, + &msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0), + LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK, + &msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1), + LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK, + &msm8998_lm_sblk, PINGPONG_2, LM_0, 0), + LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK, + &msm8998_lm_sblk, PINGPONG_MAX, 0, 0), + LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK, + &msm8998_lm_sblk, PINGPONG_MAX, 0, 0), + LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK, + &msm8998_lm_sblk, PINGPONG_3, LM_1, 0), +}; + +/* SDM845 */ + +static const struct dpu_lm_sub_blks sdm845_lm_sblk = { + .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .maxblendstages = 11, /* excluding base layer */ + .blendstage_base = { /* offsets relative to mixer base */ + 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, + 0xb0, 0xc8, 0xe0, 0xf8, 0x110 + }, +}; + +static const struct dpu_lm_cfg sdm845_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_0, LM_1, 0), + LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_1, LM_0, 0), + LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_2, LM_5, 0), + LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_MAX, 0, 0), + LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_MAX, 0, 0), + LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_3, LM_2, 0), +}; + +/* SC7180 */ + +static const struct dpu_lm_sub_blks sc7180_lm_sblk = { + .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .maxblendstages = 7, /* excluding base layer */ + .blendstage_base = { /* offsets relative to mixer base */ + 0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0 + }, +}; + +static const struct dpu_lm_cfg sc7180_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK, + &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0), + LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK, + &sc7180_lm_sblk, PINGPONG_1, LM_0, 0), +}; + +/* SM8150 */ + +static const struct dpu_lm_cfg sm8150_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0), + LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1), + LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_2, LM_3, 0), + LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_3, LM_2, 0), + LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_4, LM_5, 0), + LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK, + &sdm845_lm_sblk, PINGPONG_5, LM_4, 0), +}; + +static const struct dpu_lm_cfg sc7280_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK, + &sc7180_lm_sblk, PINGPONG_0, 0, DSPP_0), + LM_BLK("lm_2", LM_2, 0x46000, MIXER_SC7180_MASK, + &sc7180_lm_sblk, PINGPONG_2, LM_3, 0), + LM_BLK("lm_3", LM_3, 0x47000, MIXER_SC7180_MASK, + &sc7180_lm_sblk, PINGPONG_3, LM_2, 0), +}; + +/* QCM2290 */ + +static const struct dpu_lm_sub_blks qcm2290_lm_sblk = { + .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .maxblendstages = 4, /* excluding base layer */ + .blendstage_base = { /* offsets relative to mixer base */ + 0x20, 0x38, 0x50, 0x68 + }, +}; + +static const struct dpu_lm_cfg qcm2290_lm[] = { + LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK, + &qcm2290_lm_sblk, PINGPONG_0, 0, DSPP_0), +}; + +/************************************************************* + * DSPP sub blocks config + *************************************************************/ +static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = { + .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700, + .len = 0x90, .version = 0x10007}, + .gc = { .id = DPU_DSPP_GC, .base = 0x17c0, + .len = 0x90, .version = 0x10007}, +}; + +static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = { + .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700, + .len = 0x90, .version = 0x10000}, +}; + +static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = { + .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700, + .len = 0x90, .version = 0x40000}, +}; + +#define DSPP_BLK(_name, _id, _base, _mask, _sblk) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0x1800, \ + .features = _mask, \ + .sblk = _sblk \ + } + +static const struct dpu_dspp_cfg msm8998_dspp[] = { + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_MSM8998_MASK, + &msm8998_dspp_sblk), + DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_MSM8998_MASK, + &msm8998_dspp_sblk), +}; + +static const struct dpu_dspp_cfg sc7180_dspp[] = { + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK, + &sc7180_dspp_sblk), +}; + +static const struct dpu_dspp_cfg sm8150_dspp[] = { + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_1", DSPP_1, 0x56000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_2", DSPP_2, 0x58000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), + DSPP_BLK("dspp_3", DSPP_3, 0x5a000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), +}; + +static const struct dpu_dspp_cfg qcm2290_dspp[] = { + DSPP_BLK("dspp_0", DSPP_0, 0x54000, DSPP_SC7180_MASK, + &sm8150_dspp_sblk), +}; + +/************************************************************* + * PINGPONG sub blocks config + *************************************************************/ +static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = { + .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0, + .version = 0x1}, + .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0, + .len = 0x20, .version = 0x10000}, +}; + +static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = { + .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0, + .len = 0x20, .version = 0x10000}, +}; + +static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = { + .dither = {.id = DPU_PINGPONG_DITHER, .base = 0xe0, + .len = 0x20, .version = 0x20000}, +}; + +#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0xd4, \ + .features = PINGPONG_SDM845_SPLIT_MASK, \ + .merge_3d = _merge_3d, \ + .sblk = &_sblk, \ + .intr_done = _done, \ + .intr_rdptr = _rdptr, \ + } +#define PP_BLK(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0xd4, \ + .features = PINGPONG_SDM845_MASK, \ + .merge_3d = _merge_3d, \ + .sblk = &_sblk, \ + .intr_done = _done, \ + .intr_rdptr = _rdptr, \ + } + +static const struct dpu_pingpong_cfg sdm845_pp[] = { + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), + PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)), + PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)), +}; + +static struct dpu_pingpong_cfg sc7180_pp[] = { + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1), + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1), +}; + +static const struct dpu_pingpong_cfg sm8150_pp[] = { + PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), + PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)), + PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)), + PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)), + PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + -1), + PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), + -1), +}; + +static const struct dpu_pingpong_cfg sc7280_pp[] = { + PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1), +}; + +static struct dpu_pingpong_cfg qcm2290_pp[] = { + PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk, + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)), +}; + +/************************************************************* + * MERGE_3D sub blocks config + *************************************************************/ +#define MERGE_3D_BLK(_name, _id, _base) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0x8, \ + .features = MERGE_3D_SM8150_MASK, \ + .sblk = NULL \ + } + +static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = { + MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x83000), + MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x83100), + MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200), +}; + +/************************************************************* + * DSC sub blocks config + *************************************************************/ +#define DSC_BLK(_name, _id, _base) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0x140, \ + .features = 0, \ + } + +static struct dpu_dsc_cfg sdm845_dsc[] = { + DSC_BLK("dsc_0", DSC_0, 0x80000), + DSC_BLK("dsc_1", DSC_1, 0x80400), + DSC_BLK("dsc_2", DSC_2, 0x80800), + DSC_BLK("dsc_3", DSC_3, 0x80c00), +}; + +/************************************************************* + * INTF sub blocks config + *************************************************************/ +#define INTF_BLK(_name, _id, _base, _type, _ctrl_id, _progfetch, _features, _reg, _underrun_bit, _vsync_bit) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0x280, \ + .features = _features, \ + .type = _type, \ + .controller_id = _ctrl_id, \ + .prog_fetch_lines_worst_case = _progfetch, \ + .intr_underrun = DPU_IRQ_IDX(_reg, _underrun_bit), \ + .intr_vsync = DPU_IRQ_IDX(_reg, _vsync_bit), \ + } + +static const struct dpu_intf_cfg msm8998_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29), + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_HDMI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31), +}; + +static const struct dpu_intf_cfg sdm845_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29), + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31), +}; + +static const struct dpu_intf_cfg sc7180_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27), +}; + +static const struct dpu_intf_cfg sm8150_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29), + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31), +}; + +static const struct dpu_intf_cfg sc7280_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27), + INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23), +}; + +static const struct dpu_intf_cfg sc8180x_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27), + INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 28, 29), + /* INTF_3 is for MST, wired to INTF_DP 0 and 1, use dummy index until this is supported */ + INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 999, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 30, 31), + INTF_BLK("intf_4", INTF_4, 0x6C000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 20, 21), + INTF_BLK("intf_5", INTF_5, 0x6C800, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 22, 23), +}; + +static const struct dpu_intf_cfg qcm2290_intf[] = { + INTF_BLK("intf_0", INTF_0, 0x00000, INTF_NONE, 0, 0, 0, 0, 0, 0), + INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27), +}; + +/************************************************************* + * Writeback blocks config + *************************************************************/ +#define WB_BLK(_name, _id, _base, _features, _clk_ctrl, \ + __xin_id, vbif_id, _reg, _max_linewidth, _wb_done_bit) \ + { \ + .name = _name, .id = _id, \ + .base = _base, .len = 0x2c8, \ + .features = _features, \ + .format_list = wb2_formats, \ + .num_formats = ARRAY_SIZE(wb2_formats), \ + .clk_ctrl = _clk_ctrl, \ + .xin_id = __xin_id, \ + .vbif_idx = vbif_id, \ + .maxlinewidth = _max_linewidth, \ + .intr_wb_done = DPU_IRQ_IDX(_reg, _wb_done_bit) \ + } + +static const struct dpu_wb_cfg sm8250_wb[] = { + WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6, + VBIF_RT, MDP_SSPP_TOP0_INTR, 4096, 4), +}; + +/************************************************************* + * VBIF sub blocks config + *************************************************************/ +/* VBIF QOS remap */ +static const u32 msm8998_rt_pri_lvl[] = {1, 2, 2, 2}; +static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1}; +static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6}; +static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3}; + +static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { + { + .pps = 1088 * 1920 * 30, + .ot_limit = 2, + }, + { + .pps = 1088 * 1920 * 60, + .ot_limit = 6, + }, + { + .pps = 3840 * 2160 * 30, + .ot_limit = 16, + }, +}; + +static const struct dpu_vbif_cfg msm8998_vbif[] = { + { + .name = "vbif_rt", .id = VBIF_RT, + .base = 0, .len = 0x1040, + .default_ot_rd_limit = 32, + .default_ot_wr_limit = 32, + .features = BIT(DPU_VBIF_QOS_REMAP) | BIT(DPU_VBIF_QOS_OTLIM), + .xin_halt_timeout = 0x4000, + .qos_rp_remap_size = 0x20, + .dynamic_ot_rd_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .dynamic_ot_wr_tbl = { + .count = ARRAY_SIZE(msm8998_ot_rdwr_cfg), + .cfg = msm8998_ot_rdwr_cfg, + }, + .qos_rt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_rt_pri_lvl), + .priority_lvl = msm8998_rt_pri_lvl, + }, + .qos_nrt_tbl = { + .npriority_lvl = ARRAY_SIZE(msm8998_nrt_pri_lvl), + .priority_lvl = msm8998_nrt_pri_lvl, + }, + .memtype_count = 14, + .memtype = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, + }, +}; + +static const struct dpu_vbif_cfg sdm845_vbif[] = { + { + .name = "vbif_rt", .id = VBIF_RT, + .base = 0, .len = 0x1040, + .features = BIT(DPU_VBIF_QOS_REMAP), + .xin_halt_timeout = 0x4000, + .qos_rp_remap_size = 0x40, + .qos_rt_tbl = { + .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl), + .priority_lvl = sdm845_rt_pri_lvl, + }, + .qos_nrt_tbl = { + .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl), + .priority_lvl = sdm845_nrt_pri_lvl, + }, + .memtype_count = 14, + .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, + }, +}; + +static const struct dpu_reg_dma_cfg sdm845_regdma = { + .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c +}; + +static const struct dpu_reg_dma_cfg sm8150_regdma = { + .base = 0x0, .version = 0x00010001, .trigger_sel_off = 0x119c +}; + +static const struct dpu_reg_dma_cfg sm8250_regdma = { + .base = 0x0, + .version = 0x00010002, + .trigger_sel_off = 0x119c, + .xin_id = 7, + .clk_ctrl = DPU_CLK_CTRL_REG_DMA, +}; + +/************************************************************* + * PERF data config + *************************************************************/ + +/* SSPP QOS LUTs */ +static const struct dpu_qos_lut_entry msm8998_qos_linear[] = { + {.fl = 4, .lut = 0x1b}, + {.fl = 5, .lut = 0x5b}, + {.fl = 6, .lut = 0x15b}, + {.fl = 7, .lut = 0x55b}, + {.fl = 8, .lut = 0x155b}, + {.fl = 9, .lut = 0x555b}, + {.fl = 10, .lut = 0x1555b}, + {.fl = 11, .lut = 0x5555b}, + {.fl = 12, .lut = 0x15555b}, + {.fl = 13, .lut = 0x55555b}, + {.fl = 14, .lut = 0}, + {.fl = 1, .lut = 0x1b}, + {.fl = 0, .lut = 0} +}; + +static const struct dpu_qos_lut_entry sdm845_qos_linear[] = { + {.fl = 4, .lut = 0x357}, + {.fl = 5, .lut = 0x3357}, + {.fl = 6, .lut = 0x23357}, + {.fl = 7, .lut = 0x223357}, + {.fl = 8, .lut = 0x2223357}, + {.fl = 9, .lut = 0x22223357}, + {.fl = 10, .lut = 0x222223357}, + {.fl = 11, .lut = 0x2222223357}, + {.fl = 12, .lut = 0x22222223357}, + {.fl = 13, .lut = 0x222222223357}, + {.fl = 14, .lut = 0x1222222223357}, + {.fl = 0, .lut = 0x11222222223357} +}; + +static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = { + {.fl = 10, .lut = 0x1aaff}, + {.fl = 11, .lut = 0x5aaff}, + {.fl = 12, .lut = 0x15aaff}, + {.fl = 13, .lut = 0x55aaff}, + {.fl = 1, .lut = 0x1aaff}, + {.fl = 0, .lut = 0}, +}; + +static const struct dpu_qos_lut_entry sc7180_qos_linear[] = { + {.fl = 0, .lut = 0x0011222222335777}, +}; + +static const struct dpu_qos_lut_entry sm8150_qos_linear[] = { + {.fl = 0, .lut = 0x0011222222223357 }, +}; + +static const struct dpu_qos_lut_entry sc8180x_qos_linear[] = { + {.fl = 4, .lut = 0x0000000000000357 }, +}; + +static const struct dpu_qos_lut_entry qcm2290_qos_linear[] = { + {.fl = 0, .lut = 0x0011222222335777}, +}; + +static const struct dpu_qos_lut_entry sdm845_qos_macrotile[] = { + {.fl = 10, .lut = 0x344556677}, + {.fl = 11, .lut = 0x3344556677}, + {.fl = 12, .lut = 0x23344556677}, + {.fl = 13, .lut = 0x223344556677}, + {.fl = 14, .lut = 0x1223344556677}, + {.fl = 0, .lut = 0x112233344556677}, +}; + +static const struct dpu_qos_lut_entry sc7180_qos_macrotile[] = { + {.fl = 0, .lut = 0x0011223344556677}, +}; + +static const struct dpu_qos_lut_entry sc8180x_qos_macrotile[] = { + {.fl = 10, .lut = 0x0000000344556677}, +}; + +static const struct dpu_qos_lut_entry msm8998_qos_nrt[] = { + {.fl = 0, .lut = 0x0}, +}; + +static const struct dpu_qos_lut_entry sdm845_qos_nrt[] = { + {.fl = 0, .lut = 0x0}, +}; + +static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = { + {.fl = 0, .lut = 0x0}, +}; + +static const struct dpu_perf_cfg msm8998_perf_data = { + .max_bw_low = 6700000, + .max_bw_high = 6700000, + .min_core_ib = 2400000, + .min_llcc_ib = 800000, + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 25, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfffc, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(msm8998_qos_linear), + .entries = msm8998_qos_linear + }, + {.nentry = ARRAY_SIZE(msm8998_qos_macrotile), + .entries = msm8998_qos_macrotile + }, + {.nentry = ARRAY_SIZE(msm8998_qos_nrt), + .entries = msm8998_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 200, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_perf_cfg sdm845_perf_data = { + .max_bw_low = 6800000, + .max_bw_high = 6800000, + .min_core_ib = 2400000, + .min_llcc_ib = 800000, + .min_dram_ib = 800000, + .undersized_prefill_lines = 2, + .xtra_prefill_lines = 2, + .dest_scale_prefill_lines = 3, + .macrotile_prefill_lines = 4, + .yuv_nv12_prefill_lines = 8, + .linear_prefill_lines = 1, + .downscaling_prefill_lines = 1, + .amortizable_threshold = 25, + .min_prefill_lines = 24, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xf000, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sdm845_qos_linear), + .entries = sdm845_qos_linear + }, + {.nentry = ARRAY_SIZE(sdm845_qos_macrotile), + .entries = sdm845_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sdm845_qos_nrt), + .entries = sdm845_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_perf_cfg sc7180_perf_data = { + .max_bw_low = 6800000, + .max_bw_high = 6800000, + .min_core_ib = 2400000, + .min_llcc_ib = 800000, + .min_dram_ib = 1600000, + .min_prefill_lines = 24, + .danger_lut_tbl = {0xff, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_linear), + .entries = sc7180_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_perf_cfg sm8150_perf_data = { + .max_bw_low = 12800000, + .max_bw_high = 12800000, + .min_core_ib = 2400000, + .min_llcc_ib = 800000, + .min_dram_ib = 800000, + .min_prefill_lines = 24, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff8, 0xf000, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sm8150_qos_linear), + .entries = sm8150_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_perf_cfg sc8180x_perf_data = { + .max_bw_low = 9600000, + .max_bw_high = 9600000, + .min_core_ib = 2400000, + .min_llcc_ib = 800000, + .min_dram_ib = 800000, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc8180x_qos_linear), + .entries = sc8180x_qos_linear + }, + {.nentry = ARRAY_SIZE(sc8180x_qos_macrotile), + .entries = sc8180x_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_perf_cfg sm8250_perf_data = { + .max_bw_low = 13700000, + .max_bw_high = 16600000, + .min_core_ib = 4800000, + .min_llcc_ib = 0, + .min_dram_ib = 800000, + .min_prefill_lines = 35, + .danger_lut_tbl = {0xf, 0xffff, 0x0}, + .safe_lut_tbl = {0xfff0, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_linear), + .entries = sc7180_qos_linear + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + /* TODO: macrotile-qseed is different from macrotile */ + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_perf_cfg sc7280_perf_data = { + .max_bw_low = 4700000, + .max_bw_high = 8800000, + .min_core_ib = 2500000, + .min_llcc_ib = 0, + .min_dram_ib = 1600000, + .min_prefill_lines = 24, + .danger_lut_tbl = {0xffff, 0xffff, 0x0}, + .safe_lut_tbl = {0xff00, 0xff00, 0xffff}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_macrotile), + .entries = sc7180_qos_macrotile + }, + {.nentry = ARRAY_SIZE(sc7180_qos_nrt), + .entries = sc7180_qos_nrt + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; + +static const struct dpu_perf_cfg qcm2290_perf_data = { + .max_bw_low = 2700000, + .max_bw_high = 2700000, + .min_core_ib = 1300000, + .min_llcc_ib = 0, + .min_dram_ib = 1600000, + .min_prefill_lines = 24, + .danger_lut_tbl = {0xff, 0x0, 0x0}, + .safe_lut_tbl = {0xfff0, 0x0, 0x0}, + .qos_lut_tbl = { + {.nentry = ARRAY_SIZE(qcm2290_qos_linear), + .entries = qcm2290_qos_linear + }, + }, + .cdp_cfg = { + {.rd_enable = 1, .wr_enable = 1}, + {.rd_enable = 1, .wr_enable = 0} + }, + .clk_inefficiency_factor = 105, + .bw_inefficiency_factor = 120, +}; +/************************************************************* + * Hardware catalog + *************************************************************/ + +static const struct dpu_mdss_cfg msm8998_dpu_cfg = { + .caps = &msm8998_dpu_caps, + .mdp_count = ARRAY_SIZE(msm8998_mdp), + .mdp = msm8998_mdp, + .ctl_count = ARRAY_SIZE(msm8998_ctl), + .ctl = msm8998_ctl, + .sspp_count = ARRAY_SIZE(msm8998_sspp), + .sspp = msm8998_sspp, + .mixer_count = ARRAY_SIZE(msm8998_lm), + .mixer = msm8998_lm, + .dspp_count = ARRAY_SIZE(msm8998_dspp), + .dspp = msm8998_dspp, + .pingpong_count = ARRAY_SIZE(sdm845_pp), + .pingpong = sdm845_pp, + .intf_count = ARRAY_SIZE(msm8998_intf), + .intf = msm8998_intf, + .vbif_count = ARRAY_SIZE(msm8998_vbif), + .vbif = msm8998_vbif, + .reg_dma_count = 0, + .perf = &msm8998_perf_data, + .mdss_irqs = IRQ_SM8250_MASK, +}; + +static const struct dpu_mdss_cfg sdm845_dpu_cfg = { + .caps = &sdm845_dpu_caps, + .mdp_count = ARRAY_SIZE(sdm845_mdp), + .mdp = sdm845_mdp, + .ctl_count = ARRAY_SIZE(sdm845_ctl), + .ctl = sdm845_ctl, + .sspp_count = ARRAY_SIZE(sdm845_sspp), + .sspp = sdm845_sspp, + .mixer_count = ARRAY_SIZE(sdm845_lm), + .mixer = sdm845_lm, + .pingpong_count = ARRAY_SIZE(sdm845_pp), + .pingpong = sdm845_pp, + .dsc_count = ARRAY_SIZE(sdm845_dsc), + .dsc = sdm845_dsc, + .intf_count = ARRAY_SIZE(sdm845_intf), + .intf = sdm845_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .reg_dma_count = 1, + .dma_cfg = &sdm845_regdma, + .perf = &sdm845_perf_data, + .mdss_irqs = IRQ_SDM845_MASK, +}; + +static const struct dpu_mdss_cfg sc7180_dpu_cfg = { + .caps = &sc7180_dpu_caps, + .mdp_count = ARRAY_SIZE(sc7180_mdp), + .mdp = sc7180_mdp, + .ctl_count = ARRAY_SIZE(sc7180_ctl), + .ctl = sc7180_ctl, + .sspp_count = ARRAY_SIZE(sc7180_sspp), + .sspp = sc7180_sspp, + .mixer_count = ARRAY_SIZE(sc7180_lm), + .mixer = sc7180_lm, + .dspp_count = ARRAY_SIZE(sc7180_dspp), + .dspp = sc7180_dspp, + .pingpong_count = ARRAY_SIZE(sc7180_pp), + .pingpong = sc7180_pp, + .intf_count = ARRAY_SIZE(sc7180_intf), + .intf = sc7180_intf, + .wb_count = ARRAY_SIZE(sm8250_wb), + .wb = sm8250_wb, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .reg_dma_count = 1, + .dma_cfg = &sdm845_regdma, + .perf = &sc7180_perf_data, + .mdss_irqs = IRQ_SC7180_MASK, +}; + +static const struct dpu_mdss_cfg sm8150_dpu_cfg = { + .caps = &sm8150_dpu_caps, + .mdp_count = ARRAY_SIZE(sdm845_mdp), + .mdp = sdm845_mdp, + .ctl_count = ARRAY_SIZE(sm8150_ctl), + .ctl = sm8150_ctl, + .sspp_count = ARRAY_SIZE(sdm845_sspp), + .sspp = sdm845_sspp, + .mixer_count = ARRAY_SIZE(sm8150_lm), + .mixer = sm8150_lm, + .dspp_count = ARRAY_SIZE(sm8150_dspp), + .dspp = sm8150_dspp, + .pingpong_count = ARRAY_SIZE(sm8150_pp), + .pingpong = sm8150_pp, + .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d), + .merge_3d = sm8150_merge_3d, + .intf_count = ARRAY_SIZE(sm8150_intf), + .intf = sm8150_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .reg_dma_count = 1, + .dma_cfg = &sm8150_regdma, + .perf = &sm8150_perf_data, + .mdss_irqs = IRQ_SDM845_MASK, +}; + +static const struct dpu_mdss_cfg sc8180x_dpu_cfg = { + .caps = &sc8180x_dpu_caps, + .mdp_count = ARRAY_SIZE(sc8180x_mdp), + .mdp = sc8180x_mdp, + .ctl_count = ARRAY_SIZE(sm8150_ctl), + .ctl = sm8150_ctl, + .sspp_count = ARRAY_SIZE(sdm845_sspp), + .sspp = sdm845_sspp, + .mixer_count = ARRAY_SIZE(sm8150_lm), + .mixer = sm8150_lm, + .pingpong_count = ARRAY_SIZE(sm8150_pp), + .pingpong = sm8150_pp, + .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d), + .merge_3d = sm8150_merge_3d, + .intf_count = ARRAY_SIZE(sc8180x_intf), + .intf = sc8180x_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .reg_dma_count = 1, + .dma_cfg = &sm8150_regdma, + .perf = &sc8180x_perf_data, + .mdss_irqs = IRQ_SC8180X_MASK, +}; + +static const struct dpu_mdss_cfg sm8250_dpu_cfg = { + .caps = &sm8250_dpu_caps, + .mdp_count = ARRAY_SIZE(sm8250_mdp), + .mdp = sm8250_mdp, + .ctl_count = ARRAY_SIZE(sm8150_ctl), + .ctl = sm8150_ctl, + .sspp_count = ARRAY_SIZE(sm8250_sspp), + .sspp = sm8250_sspp, + .mixer_count = ARRAY_SIZE(sm8150_lm), + .mixer = sm8150_lm, + .dspp_count = ARRAY_SIZE(sm8150_dspp), + .dspp = sm8150_dspp, + .pingpong_count = ARRAY_SIZE(sm8150_pp), + .pingpong = sm8150_pp, + .merge_3d_count = ARRAY_SIZE(sm8150_merge_3d), + .merge_3d = sm8150_merge_3d, + .intf_count = ARRAY_SIZE(sm8150_intf), + .intf = sm8150_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .wb_count = ARRAY_SIZE(sm8250_wb), + .wb = sm8250_wb, + .reg_dma_count = 1, + .dma_cfg = &sm8250_regdma, + .perf = &sm8250_perf_data, + .mdss_irqs = IRQ_SM8250_MASK, +}; + +static const struct dpu_mdss_cfg sc7280_dpu_cfg = { + .caps = &sc7280_dpu_caps, + .mdp_count = ARRAY_SIZE(sc7280_mdp), + .mdp = sc7280_mdp, + .ctl_count = ARRAY_SIZE(sc7280_ctl), + .ctl = sc7280_ctl, + .sspp_count = ARRAY_SIZE(sc7280_sspp), + .sspp = sc7280_sspp, + .dspp_count = ARRAY_SIZE(sc7180_dspp), + .dspp = sc7180_dspp, + .mixer_count = ARRAY_SIZE(sc7280_lm), + .mixer = sc7280_lm, + .pingpong_count = ARRAY_SIZE(sc7280_pp), + .pingpong = sc7280_pp, + .intf_count = ARRAY_SIZE(sc7280_intf), + .intf = sc7280_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .perf = &sc7280_perf_data, + .mdss_irqs = IRQ_SC7280_MASK, +}; + +static const struct dpu_mdss_cfg qcm2290_dpu_cfg = { + .caps = &qcm2290_dpu_caps, + .mdp_count = ARRAY_SIZE(qcm2290_mdp), + .mdp = qcm2290_mdp, + .ctl_count = ARRAY_SIZE(qcm2290_ctl), + .ctl = qcm2290_ctl, + .sspp_count = ARRAY_SIZE(qcm2290_sspp), + .sspp = qcm2290_sspp, + .mixer_count = ARRAY_SIZE(qcm2290_lm), + .mixer = qcm2290_lm, + .dspp_count = ARRAY_SIZE(qcm2290_dspp), + .dspp = qcm2290_dspp, + .pingpong_count = ARRAY_SIZE(qcm2290_pp), + .pingpong = qcm2290_pp, + .intf_count = ARRAY_SIZE(qcm2290_intf), + .intf = qcm2290_intf, + .vbif_count = ARRAY_SIZE(sdm845_vbif), + .vbif = sdm845_vbif, + .perf = &qcm2290_perf_data, + .mdss_irqs = IRQ_SC7180_MASK, +}; + +static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = { + { .hw_rev = DPU_HW_VER_300, .dpu_cfg = &msm8998_dpu_cfg}, + { .hw_rev = DPU_HW_VER_301, .dpu_cfg = &msm8998_dpu_cfg}, + { .hw_rev = DPU_HW_VER_400, .dpu_cfg = &sdm845_dpu_cfg}, + { .hw_rev = DPU_HW_VER_401, .dpu_cfg = &sdm845_dpu_cfg}, + { .hw_rev = DPU_HW_VER_500, .dpu_cfg = &sm8150_dpu_cfg}, + { .hw_rev = DPU_HW_VER_501, .dpu_cfg = &sm8150_dpu_cfg}, + { .hw_rev = DPU_HW_VER_510, .dpu_cfg = &sc8180x_dpu_cfg}, + { .hw_rev = DPU_HW_VER_600, .dpu_cfg = &sm8250_dpu_cfg}, + { .hw_rev = DPU_HW_VER_620, .dpu_cfg = &sc7180_dpu_cfg}, + { .hw_rev = DPU_HW_VER_650, .dpu_cfg = &qcm2290_dpu_cfg}, + { .hw_rev = DPU_HW_VER_720, .dpu_cfg = &sc7280_dpu_cfg}, +}; + +const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) { + if (cfg_handler[i].hw_rev == hw_rev) + return cfg_handler[i].dpu_cfg; + } + + DPU_ERROR("unsupported chipset id:%X\n", hw_rev); + + return ERR_PTR(-ENODEV); +} + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h new file mode 100644 index 000000000..77c46ce5a --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -0,0 +1,895 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_CATALOG_H +#define _DPU_HW_CATALOG_H + +#include +#include +#include +#include + +/** + * Max hardware block count: For ex: max 12 SSPP pipes or + * 5 ctl paths. In all cases, it can have max 12 hardware blocks + * based on current design + */ +#define MAX_BLOCKS 12 + +#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\ + ((MINOR & 0xFFF) << 16) |\ + (STEP & 0xFFFF)) + +#define DPU_HW_MAJOR(rev) ((rev) >> 28) +#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF) +#define DPU_HW_STEP(rev) ((rev) & 0xFFFF) +#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16) + +#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \ + (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2))) + +#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */ +#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */ +#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */ +#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */ +#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */ +#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */ +#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */ +#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */ +#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sm8150 v1.0 */ +#define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */ +#define DPU_HW_VER_510 DPU_HW_VER(5, 1, 1) /* sc8180 */ +#define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */ +#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */ +#define DPU_HW_VER_650 DPU_HW_VER(6, 5, 0) /* qcm2290|sm4125 */ +#define DPU_HW_VER_720 DPU_HW_VER(7, 2, 0) /* sc7280 */ + +#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170) +#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300) +#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400) +#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410) +#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500) +#define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620) +#define IS_SC7280_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_720) + +#define DPU_HW_BLK_NAME_LEN 16 + +#define MAX_IMG_WIDTH 0x3fff +#define MAX_IMG_HEIGHT 0x3fff + +#define CRTC_DUAL_MIXERS 2 + +#define MAX_XIN_COUNT 16 + +/** + * Supported UBWC feature versions + */ +enum { + DPU_HW_UBWC_VER_10 = 0x100, + DPU_HW_UBWC_VER_20 = 0x200, + DPU_HW_UBWC_VER_30 = 0x300, + DPU_HW_UBWC_VER_40 = 0x400, +}; + +/** + * MDP TOP BLOCK features + * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe + * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats + * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression. + * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth + * compression initial revision + * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5 + * @DPU_MDP_MAX Maximum value + + */ +enum { + DPU_MDP_PANIC_PER_PIPE = 0x1, + DPU_MDP_10BIT_SUPPORT, + DPU_MDP_BWC, + DPU_MDP_UBWC_1_0, + DPU_MDP_UBWC_1_5, + DPU_MDP_AUDIO_SELECT, + DPU_MDP_MAX +}; + +/** + * SSPP sub-blocks/features + * @DPU_SSPP_SRC Src and fetch part of the pipes, + * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support + * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support + * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support + * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support + * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes + * @DPU_SSPP_CSC, Support of Color space converion + * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion + * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer + * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq + * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control + * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect + * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support + * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support + * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper + * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec + * @DPU_SSPP_CDP Supports client driven prefetch + * @DPU_SSPP_INLINE_ROTATION Support inline rotation + * @DPU_SSPP_MAX maximum value + */ +enum { + DPU_SSPP_SRC = 0x1, + DPU_SSPP_SCALER_QSEED2, + DPU_SSPP_SCALER_QSEED3, + DPU_SSPP_SCALER_QSEED3LITE, + DPU_SSPP_SCALER_QSEED4, + DPU_SSPP_SCALER_RGB, + DPU_SSPP_CSC, + DPU_SSPP_CSC_10BIT, + DPU_SSPP_CURSOR, + DPU_SSPP_QOS, + DPU_SSPP_QOS_8LVL, + DPU_SSPP_EXCL_RECT, + DPU_SSPP_SMART_DMA_V1, + DPU_SSPP_SMART_DMA_V2, + DPU_SSPP_TS_PREFILL, + DPU_SSPP_TS_PREFILL_REC1, + DPU_SSPP_CDP, + DPU_SSPP_INLINE_ROTATION, + DPU_SSPP_MAX +}; + +/* + * MIXER sub-blocks/features + * @DPU_MIXER_LAYER Layer mixer layer blend configuration, + * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration + * @DPU_MIXER_GC Gamma correction block + * @DPU_DIM_LAYER Layer mixer supports dim layer + * @DPU_MIXER_COMBINED_ALPHA Layer mixer has combined alpha register + * @DPU_MIXER_MAX maximum value + */ +enum { + DPU_MIXER_LAYER = 0x1, + DPU_MIXER_SOURCESPLIT, + DPU_MIXER_GC, + DPU_DIM_LAYER, + DPU_MIXER_COMBINED_ALPHA, + DPU_MIXER_MAX +}; + +/** + * DSPP sub-blocks + * @DPU_DSPP_PCC Panel color correction block + * @DPU_DSPP_GC Gamma correction block + */ +enum { + DPU_DSPP_PCC = 0x1, + DPU_DSPP_GC, + DPU_DSPP_MAX +}; + +/** + * PINGPONG sub-blocks + * @DPU_PINGPONG_TE Tear check block + * @DPU_PINGPONG_TE2 Additional tear check block for split pipes + * @DPU_PINGPONG_SPLIT PP block supports split fifo + * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo + * @DPU_PINGPONG_DITHER, Dither blocks + * @DPU_PINGPONG_MAX + */ +enum { + DPU_PINGPONG_TE = 0x1, + DPU_PINGPONG_TE2, + DPU_PINGPONG_SPLIT, + DPU_PINGPONG_SLAVE, + DPU_PINGPONG_DITHER, + DPU_PINGPONG_MAX +}; + +/** + * CTL sub-blocks + * @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display + * @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs) + * @DPU_CTL_VM_CFG: CTL config to support multiple VMs + * @DPU_CTL_MAX + */ +enum { + DPU_CTL_SPLIT_DISPLAY = 0x1, + DPU_CTL_ACTIVE_CFG, + DPU_CTL_FETCH_ACTIVE, + DPU_CTL_VM_CFG, + DPU_CTL_MAX +}; + +/** + * INTF sub-blocks + * @DPU_INTF_INPUT_CTRL Supports the setting of pp block from which + * pixel data arrives to this INTF + * @DPU_INTF_TE INTF block has TE configuration support + * @DPU_DATA_HCTL_EN Allows data to be transferred at different rate + * than video timing + * @DPU_INTF_STATUS_SUPPORTED INTF block has INTF_STATUS register + * @DPU_INTF_MAX + */ +enum { + DPU_INTF_INPUT_CTRL = 0x1, + DPU_INTF_TE, + DPU_DATA_HCTL_EN, + DPU_INTF_STATUS_SUPPORTED, + DPU_INTF_MAX +}; + +/** + * WB sub-blocks and features + * @DPU_WB_LINE_MODE Writeback module supports line/linear mode + * @DPU_WB_BLOCK_MODE Writeback module supports block mode read + * @DPU_WB_CHROMA_DOWN, Writeback chroma down block, + * @DPU_WB_DOWNSCALE, Writeback integer downscaler, + * @DPU_WB_DITHER, Dither block + * @DPU_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc + * @DPU_WB_UBWC, Writeback Universal bandwidth compression + * @DPU_WB_YUV_CONFIG Writeback supports output of YUV colorspace + * @DPU_WB_PIPE_ALPHA Writeback supports pipe alpha + * @DPU_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in + * the destination image + * @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq + * @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control + * @DPU_WB_CDP Writeback supports client driven prefetch + * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel + * data arrives. + * @DPU_WB_CROP CWB supports cropping + * @DPU_WB_MAX maximum value + */ +enum { + DPU_WB_LINE_MODE = 0x1, + DPU_WB_BLOCK_MODE, + DPU_WB_UBWC, + DPU_WB_YUV_CONFIG, + DPU_WB_PIPE_ALPHA, + DPU_WB_XY_ROI_OFFSET, + DPU_WB_QOS, + DPU_WB_QOS_8LVL, + DPU_WB_CDP, + DPU_WB_INPUT_CTRL, + DPU_WB_CROP, + DPU_WB_MAX +}; + +/** + * VBIF sub-blocks and features + * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit + * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap + * @DPU_VBIF_MAX maximum value + */ +enum { + DPU_VBIF_QOS_OTLIM = 0x1, + DPU_VBIF_QOS_REMAP, + DPU_VBIF_MAX +}; + +/** + * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU + * @name: string name for debug purposes + * @id: enum identifying this block + * @base: register base offset to mdss + * @len: length of hardware block + * @features bit mask identifying sub-blocks/features + */ +#define DPU_HW_BLK_INFO \ + char name[DPU_HW_BLK_NAME_LEN]; \ + u32 id; \ + u32 base; \ + u32 len; \ + unsigned long features + +/** + * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU + * @name: string name for debug purposes + * @id: enum identifying this sub-block + * @base: offset of this sub-block relative to the block + * offset + * @len register block length of this sub-block + */ +#define DPU_HW_SUBBLK_INFO \ + char name[DPU_HW_BLK_NAME_LEN]; \ + u32 id; \ + u32 base; \ + u32 len + +/** + * struct dpu_src_blk: SSPP part of the source pipes + * @info: HW register and features supported by this sub-blk + */ +struct dpu_src_blk { + DPU_HW_SUBBLK_INFO; +}; + +/** + * struct dpu_scaler_blk: Scaler information + * @info: HW register and features supported by this sub-blk + * @version: qseed block revision + */ +struct dpu_scaler_blk { + DPU_HW_SUBBLK_INFO; + u32 version; +}; + +struct dpu_csc_blk { + DPU_HW_SUBBLK_INFO; +}; + +/** + * struct dpu_pp_blk : Pixel processing sub-blk information + * @info: HW register and features supported by this sub-blk + * @version: HW Algorithm version + */ +struct dpu_pp_blk { + DPU_HW_SUBBLK_INFO; + u32 version; +}; + +/** + * enum dpu_qos_lut_usage - define QoS LUT use cases + */ +enum dpu_qos_lut_usage { + DPU_QOS_LUT_USAGE_LINEAR, + DPU_QOS_LUT_USAGE_MACROTILE, + DPU_QOS_LUT_USAGE_NRT, + DPU_QOS_LUT_USAGE_MAX, +}; + +/** + * struct dpu_qos_lut_entry - define QoS LUT table entry + * @fl: fill level, or zero on last entry to indicate default lut + * @lut: lut to use if equal to or less than fill level + */ +struct dpu_qos_lut_entry { + u32 fl; + u64 lut; +}; + +/** + * struct dpu_qos_lut_tbl - define QoS LUT table + * @nentry: number of entry in this table + * @entries: Pointer to table entries + */ +struct dpu_qos_lut_tbl { + u32 nentry; + const struct dpu_qos_lut_entry *entries; +}; + +/** + * struct dpu_rotation_cfg - define inline rotation config + * @rot_maxheight: max pre rotated height allowed for rotation + * @rot_num_formats: number of elements in @rot_format_list + * @rot_format_list: list of supported rotator formats + */ +struct dpu_rotation_cfg { + u32 rot_maxheight; + size_t rot_num_formats; + const u32 *rot_format_list; +}; + +/** + * struct dpu_caps - define DPU capabilities + * @max_mixer_width max layer mixer line width support. + * @max_mixer_blendstages max layer mixer blend stages or + * supported z order + * @qseed_type qseed2 or qseed3 support. + * @smart_dma_rev Supported version of SmartDMA feature. + * @ubwc_version UBWC feature version (0x0 for not supported) + * @has_src_split source split feature status + * @has_dim_layer dim layer feature status + * @has_idle_pc indicate if idle power collapse feature is supported + * @has_3d_merge indicate if 3D merge is supported + * @max_linewidth max linewidth for sspp + * @pixel_ram_size size of latency hiding and de-tiling buffer in bytes + * @max_hdeci_exp max horizontal decimation supported (max is 2^value) + * @max_vdeci_exp max vertical decimation supported (max is 2^value) + */ +struct dpu_caps { + u32 max_mixer_width; + u32 max_mixer_blendstages; + u32 qseed_type; + u32 smart_dma_rev; + u32 ubwc_version; + bool has_src_split; + bool has_dim_layer; + bool has_idle_pc; + bool has_3d_merge; + /* SSPP limits */ + u32 max_linewidth; + u32 pixel_ram_size; + u32 max_hdeci_exp; + u32 max_vdeci_exp; +}; + +/** + * struct dpu_sspp_sub_blks : SSPP sub-blocks + * common: Pointer to common configurations shared by sub blocks + * @creq_vblank: creq priority during vertical blanking + * @danger_vblank: danger priority during vertical blanking + * @maxdwnscale: max downscale ratio supported(without DECIMATION) + * @maxupscale: maxupscale ratio supported + * @smart_dma_priority: hw priority of rect1 of multirect pipe + * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps + * @qseed_ver: qseed version + * @src_blk: + * @scaler_blk: + * @csc_blk: + * @hsic: + * @memcolor: + * @pcc_blk: + * @igc_blk: + * @format_list: Pointer to list of supported formats + * @num_formats: Number of supported formats + * @virt_format_list: Pointer to list of supported formats for virtual planes + * @virt_num_formats: Number of supported formats for virtual planes + * @dpu_rotation_cfg: inline rotation configuration + */ +struct dpu_sspp_sub_blks { + u32 creq_vblank; + u32 danger_vblank; + u32 maxdwnscale; + u32 maxupscale; + u32 smart_dma_priority; + u32 max_per_pipe_bw; + u32 qseed_ver; + struct dpu_src_blk src_blk; + struct dpu_scaler_blk scaler_blk; + struct dpu_pp_blk csc_blk; + struct dpu_pp_blk hsic_blk; + struct dpu_pp_blk memcolor_blk; + struct dpu_pp_blk pcc_blk; + struct dpu_pp_blk igc_blk; + + const u32 *format_list; + u32 num_formats; + const u32 *virt_format_list; + u32 virt_num_formats; + const struct dpu_rotation_cfg *rotation_cfg; +}; + +/** + * struct dpu_lm_sub_blks: information of mixer block + * @maxwidth: Max pixel width supported by this mixer + * @maxblendstages: Max number of blend-stages supported + * @blendstage_base: Blend-stage register base offset + * @gc: gamma correction block + */ +struct dpu_lm_sub_blks { + u32 maxwidth; + u32 maxblendstages; + u32 blendstage_base[MAX_BLOCKS]; + struct dpu_pp_blk gc; +}; + +/** + * struct dpu_dspp_sub_blks: Information of DSPP block + * @gc : gamma correction block + * @pcc: pixel color correction block + */ +struct dpu_dspp_sub_blks { + struct dpu_pp_blk gc; + struct dpu_pp_blk pcc; +}; + +struct dpu_pingpong_sub_blks { + struct dpu_pp_blk te; + struct dpu_pp_blk te2; + struct dpu_pp_blk dither; +}; + +/** + * dpu_clk_ctrl_type - Defines top level clock control signals + */ +enum dpu_clk_ctrl_type { + DPU_CLK_CTRL_NONE, + DPU_CLK_CTRL_VIG0, + DPU_CLK_CTRL_VIG1, + DPU_CLK_CTRL_VIG2, + DPU_CLK_CTRL_VIG3, + DPU_CLK_CTRL_VIG4, + DPU_CLK_CTRL_RGB0, + DPU_CLK_CTRL_RGB1, + DPU_CLK_CTRL_RGB2, + DPU_CLK_CTRL_RGB3, + DPU_CLK_CTRL_DMA0, + DPU_CLK_CTRL_DMA1, + DPU_CLK_CTRL_DMA2, + DPU_CLK_CTRL_DMA3, + DPU_CLK_CTRL_CURSOR0, + DPU_CLK_CTRL_CURSOR1, + DPU_CLK_CTRL_INLINE_ROT0_SSPP, + DPU_CLK_CTRL_REG_DMA, + DPU_CLK_CTRL_WB2, + DPU_CLK_CTRL_MAX, +}; + +/* struct dpu_clk_ctrl_reg : Clock control register + * @reg_off: register offset + * @bit_off: bit offset + */ +struct dpu_clk_ctrl_reg { + u32 reg_off; + u32 bit_off; +}; + +/* struct dpu_mdp_cfg : MDP TOP-BLK instance info + * @id: index identifying this block + * @base: register base offset to mdss + * @features bit mask identifying sub-blocks/features + * @highest_bank_bit: UBWC parameter + * @ubwc_static: ubwc static configuration + * @ubwc_swizzle: ubwc default swizzle setting + * @clk_ctrls clock control register definition + */ +struct dpu_mdp_cfg { + DPU_HW_BLK_INFO; + u32 highest_bank_bit; + u32 ubwc_swizzle; + struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX]; +}; + +/* struct dpu_ctl_cfg : MDP CTL instance info + * @id: index identifying this block + * @base: register base offset to mdss + * @features bit mask identifying sub-blocks/features + * @intr_start: interrupt index for CTL_START + */ +struct dpu_ctl_cfg { + DPU_HW_BLK_INFO; + s32 intr_start; +}; + +/** + * struct dpu_sspp_cfg - information of source pipes + * @id: index identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @sblk: SSPP sub-blocks information + * @xin_id: bus client identifier + * @clk_ctrl clock control identifier + * @type sspp type identifier + */ +struct dpu_sspp_cfg { + DPU_HW_BLK_INFO; + const struct dpu_sspp_sub_blks *sblk; + u32 xin_id; + enum dpu_clk_ctrl_type clk_ctrl; + u32 type; +}; + +/** + * struct dpu_lm_cfg - information of layer mixer blocks + * @id: index identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @sblk: LM Sub-blocks information + * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported + * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL + */ +struct dpu_lm_cfg { + DPU_HW_BLK_INFO; + const struct dpu_lm_sub_blks *sblk; + u32 pingpong; + u32 dspp; + unsigned long lm_pair_mask; +}; + +/** + * struct dpu_dspp_cfg - information of DSPP blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * supported by this block + * @sblk sub-blocks information + */ +struct dpu_dspp_cfg { + DPU_HW_BLK_INFO; + const struct dpu_dspp_sub_blks *sblk; +}; + +/** + * struct dpu_pingpong_cfg - information of PING-PONG blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @intr_done: index for PINGPONG done interrupt + * @intr_rdptr: index for PINGPONG readpointer done interrupt + * @sblk sub-blocks information + */ +struct dpu_pingpong_cfg { + DPU_HW_BLK_INFO; + u32 merge_3d; + s32 intr_done; + s32 intr_rdptr; + const struct dpu_pingpong_sub_blks *sblk; +}; + +/** + * struct dpu_merge_3d_cfg - information of DSPP blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * supported by this block + * @sblk sub-blocks information + */ +struct dpu_merge_3d_cfg { + DPU_HW_BLK_INFO; + const struct dpu_merge_3d_sub_blks *sblk; +}; + +/** + * struct dpu_dsc_cfg - information of DSC blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + */ +struct dpu_dsc_cfg { + DPU_HW_BLK_INFO; +}; + +/** + * struct dpu_intf_cfg - information of timing engine blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @type: Interface type(DSI, DP, HDMI) + * @controller_id: Controller Instance ID in case of multiple of intf type + * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch + * @intr_underrun: index for INTF underrun interrupt + * @intr_vsync: index for INTF VSYNC interrupt + */ +struct dpu_intf_cfg { + DPU_HW_BLK_INFO; + u32 type; /* interface type*/ + u32 controller_id; + u32 prog_fetch_lines_worst_case; + s32 intr_underrun; + s32 intr_vsync; +}; + +/** + * struct dpu_wb_cfg - information of writeback blocks + * @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO + * @vbif_idx: vbif client index + * @maxlinewidth: max line width supported by writeback block + * @xin_id: bus client identifier + * @intr_wb_done: interrupt index for WB_DONE + * @format_list: list of formats supported by this writeback block + * @num_formats: number of formats supported by this writeback block + * @clk_ctrl: clock control identifier + */ +struct dpu_wb_cfg { + DPU_HW_BLK_INFO; + u8 vbif_idx; + u32 maxlinewidth; + u32 xin_id; + s32 intr_wb_done; + const u32 *format_list; + u32 num_formats; + enum dpu_clk_ctrl_type clk_ctrl; +}; + +/** + * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting + * @pps pixel per seconds + * @ot_limit OT limit to use up to specified pixel per second + */ +struct dpu_vbif_dynamic_ot_cfg { + u64 pps; + u32 ot_limit; +}; + +/** + * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table + * @count length of cfg + * @cfg pointer to array of configuration settings with + * ascending requirements + */ +struct dpu_vbif_dynamic_ot_tbl { + u32 count; + const struct dpu_vbif_dynamic_ot_cfg *cfg; +}; + +/** + * struct dpu_vbif_qos_tbl - QoS priority table + * @npriority_lvl num of priority level + * @priority_lvl pointer to array of priority level in ascending order + */ +struct dpu_vbif_qos_tbl { + u32 npriority_lvl; + const u32 *priority_lvl; +}; + +/** + * struct dpu_vbif_cfg - information of VBIF blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @ot_rd_limit default OT read limit + * @ot_wr_limit default OT write limit + * @xin_halt_timeout maximum time (in usec) for xin to halt + * @qos_rp_remap_size size of VBIF_XINL_QOS_RP_REMAP register space + * @dynamic_ot_rd_tbl dynamic OT read configuration table + * @dynamic_ot_wr_tbl dynamic OT write configuration table + * @qos_rt_tbl real-time QoS priority table + * @qos_nrt_tbl non-real-time QoS priority table + * @memtype_count number of defined memtypes + * @memtype array of xin memtype definitions + */ +struct dpu_vbif_cfg { + DPU_HW_BLK_INFO; + u32 default_ot_rd_limit; + u32 default_ot_wr_limit; + u32 xin_halt_timeout; + u32 qos_rp_remap_size; + struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl; + struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl; + struct dpu_vbif_qos_tbl qos_rt_tbl; + struct dpu_vbif_qos_tbl qos_nrt_tbl; + u32 memtype_count; + u32 memtype[MAX_XIN_COUNT]; +}; +/** + * struct dpu_reg_dma_cfg - information of lut dma blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * @version version of lutdma hw block + * @trigger_sel_off offset to trigger select registers of lutdma + */ +struct dpu_reg_dma_cfg { + DPU_HW_BLK_INFO; + u32 version; + u32 trigger_sel_off; + u32 xin_id; + enum dpu_clk_ctrl_type clk_ctrl; +}; + +/** + * Define CDP use cases + * @DPU_PERF_CDP_UDAGE_RT: real-time use cases + * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD + */ +enum { + DPU_PERF_CDP_USAGE_RT, + DPU_PERF_CDP_USAGE_NRT, + DPU_PERF_CDP_USAGE_MAX +}; + +/** + * struct dpu_perf_cdp_cfg - define CDP use case configuration + * @rd_enable: true if read pipe CDP is enabled + * @wr_enable: true if write pipe CDP is enabled + */ +struct dpu_perf_cdp_cfg { + bool rd_enable; + bool wr_enable; +}; + +/** + * struct dpu_perf_cfg - performance control settings + * @max_bw_low low threshold of maximum bandwidth (kbps) + * @max_bw_high high threshold of maximum bandwidth (kbps) + * @min_core_ib minimum bandwidth for core (kbps) + * @min_core_ib minimum mnoc ib vote in kbps + * @min_llcc_ib minimum llcc ib vote in kbps + * @min_dram_ib minimum dram ib vote in kbps + * @undersized_prefill_lines undersized prefill in lines + * @xtra_prefill_lines extra prefill latency in lines + * @dest_scale_prefill_lines destination scaler latency in lines + * @macrotile_perfill_lines macrotile latency in lines + * @yuv_nv12_prefill_lines yuv_nv12 latency in lines + * @linear_prefill_lines linear latency in lines + * @downscaling_prefill_lines downscaling latency in lines + * @amortizable_theshold minimum y position for traffic shaping prefill + * @min_prefill_lines minimum pipeline latency in lines + * @clk_inefficiency_factor DPU src clock inefficiency factor + * @bw_inefficiency_factor DPU axi bus bw inefficiency factor + * @safe_lut_tbl: LUT tables for safe signals + * @danger_lut_tbl: LUT tables for danger signals + * @qos_lut_tbl: LUT tables for QoS signals + * @cdp_cfg cdp use case configurations + */ +struct dpu_perf_cfg { + u32 max_bw_low; + u32 max_bw_high; + u32 min_core_ib; + u32 min_llcc_ib; + u32 min_dram_ib; + u32 undersized_prefill_lines; + u32 xtra_prefill_lines; + u32 dest_scale_prefill_lines; + u32 macrotile_prefill_lines; + u32 yuv_nv12_prefill_lines; + u32 linear_prefill_lines; + u32 downscaling_prefill_lines; + u32 amortizable_threshold; + u32 min_prefill_lines; + u32 clk_inefficiency_factor; + u32 bw_inefficiency_factor; + u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; + u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; + struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX]; + struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX]; +}; + +/** + * struct dpu_mdss_cfg - information of MDSS HW + * This is the main catalog data structure representing + * this HW version. Contains number of instances, + * register offsets, capabilities of the all MDSS HW sub-blocks. + * + * @dma_formats Supported formats for dma pipe + * @cursor_formats Supported formats for cursor pipe + * @vig_formats Supported formats for vig pipe + * @mdss_irqs: Bitmap with the irqs supported by the target + */ +struct dpu_mdss_cfg { + const struct dpu_caps *caps; + + u32 mdp_count; + const struct dpu_mdp_cfg *mdp; + + u32 ctl_count; + const struct dpu_ctl_cfg *ctl; + + u32 sspp_count; + const struct dpu_sspp_cfg *sspp; + + u32 mixer_count; + const struct dpu_lm_cfg *mixer; + + u32 pingpong_count; + const struct dpu_pingpong_cfg *pingpong; + + u32 merge_3d_count; + const struct dpu_merge_3d_cfg *merge_3d; + + u32 dsc_count; + struct dpu_dsc_cfg *dsc; + + u32 intf_count; + const struct dpu_intf_cfg *intf; + + u32 vbif_count; + const struct dpu_vbif_cfg *vbif; + + u32 wb_count; + const struct dpu_wb_cfg *wb; + + u32 reg_dma_count; + const struct dpu_reg_dma_cfg *dma_cfg; + + u32 ad_count; + + u32 dspp_count; + const struct dpu_dspp_cfg *dspp; + + /* Add additional block data structures here */ + + const struct dpu_perf_cfg *perf; + const struct dpu_format_extended *dma_formats; + const struct dpu_format_extended *cursor_formats; + const struct dpu_format_extended *vig_formats; + + unsigned long mdss_irqs; +}; + +struct dpu_mdss_hw_cfg_handler { + u32 hw_rev; + const struct dpu_mdss_cfg *dpu_cfg; +}; + +/** + * dpu_hw_catalog_init - dpu hardware catalog init API retrieves + * hardcoded target specific catalog information in config structure + * @hw_rev: caller needs provide the hardware revision. + * + * Return: dpu config structure + */ +const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev); + +#endif /* _DPU_HW_CATALOG_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c new file mode 100644 index 000000000..696c32d30 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -0,0 +1,713 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "dpu_hwio.h" +#include "dpu_hw_ctl.h" +#include "dpu_kms.h" +#include "dpu_trace.h" + +#define CTL_LAYER(lm) \ + (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) +#define CTL_LAYER_EXT(lm) \ + (0x40 + (((lm) - LM_0) * 0x004)) +#define CTL_LAYER_EXT2(lm) \ + (0x70 + (((lm) - LM_0) * 0x004)) +#define CTL_LAYER_EXT3(lm) \ + (0xA0 + (((lm) - LM_0) * 0x004)) +#define CTL_TOP 0x014 +#define CTL_FLUSH 0x018 +#define CTL_START 0x01C +#define CTL_PREPARE 0x0d0 +#define CTL_SW_RESET 0x030 +#define CTL_LAYER_EXTN_OFFSET 0x40 +#define CTL_MERGE_3D_ACTIVE 0x0E4 +#define CTL_WB_ACTIVE 0x0EC +#define CTL_INTF_ACTIVE 0x0F4 +#define CTL_MERGE_3D_FLUSH 0x100 +#define CTL_DSC_ACTIVE 0x0E8 +#define CTL_DSC_FLUSH 0x104 +#define CTL_WB_FLUSH 0x108 +#define CTL_INTF_FLUSH 0x110 +#define CTL_INTF_MASTER 0x134 +#define CTL_FETCH_PIPE_ACTIVE 0x0FC + +#define CTL_MIXER_BORDER_OUT BIT(24) +#define CTL_FLUSH_MASK_CTL BIT(17) + +#define DPU_REG_RESET_TIMEOUT_US 2000 +#define MERGE_3D_IDX 23 +#define DSC_IDX 22 +#define INTF_IDX 31 +#define WB_IDX 16 +#define CTL_INVALID_BIT 0xffff +#define CTL_DEFAULT_GROUP_ID 0xf + +static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19, + CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0, + 1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT}; + +static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->ctl_count; i++) { + if (ctl == m->ctl[i].id) { + b->blk_addr = addr + m->ctl[i].base; + b->log_mask = DPU_DBG_MASK_CTL; + return &m->ctl[i]; + } + } + return ERR_PTR(-ENOMEM); +} + +static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count, + enum dpu_lm lm) +{ + int i; + int stages = -EINVAL; + + for (i = 0; i < count; i++) { + if (lm == mixer[i].id) { + stages = mixer[i].sblk->maxblendstages; + break; + } + } + + return stages; +} + +static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + + return DPU_REG_READ(c, CTL_FLUSH); +} + +static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx) +{ + trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask, + dpu_hw_ctl_get_flush_register(ctx)); + DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1); +} + +static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx) +{ + return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0)); +} + +static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx) +{ + trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask, + dpu_hw_ctl_get_flush_register(ctx)); + DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1); +} + +static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx) +{ + trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask, + dpu_hw_ctl_get_flush_register(ctx)); + ctx->pending_flush_mask = 0x0; +} + +static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx, + u32 flushbits) +{ + trace_dpu_hw_ctl_update_pending_flush(flushbits, + ctx->pending_flush_mask); + ctx->pending_flush_mask |= flushbits; +} + +static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) +{ + return ctx->pending_flush_mask; +} + +static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) +{ + if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX)) + DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH, + ctx->pending_merge_3d_flush_mask); + if (ctx->pending_flush_mask & BIT(INTF_IDX)) + DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH, + ctx->pending_intf_flush_mask); + if (ctx->pending_flush_mask & BIT(WB_IDX)) + DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH, + ctx->pending_wb_flush_mask); + + DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); +} + +static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) +{ + trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask, + dpu_hw_ctl_get_flush_register(ctx)); + DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); +} + +static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx, + enum dpu_sspp sspp) +{ + switch (sspp) { + case SSPP_VIG0: + ctx->pending_flush_mask |= BIT(0); + break; + case SSPP_VIG1: + ctx->pending_flush_mask |= BIT(1); + break; + case SSPP_VIG2: + ctx->pending_flush_mask |= BIT(2); + break; + case SSPP_VIG3: + ctx->pending_flush_mask |= BIT(18); + break; + case SSPP_RGB0: + ctx->pending_flush_mask |= BIT(3); + break; + case SSPP_RGB1: + ctx->pending_flush_mask |= BIT(4); + break; + case SSPP_RGB2: + ctx->pending_flush_mask |= BIT(5); + break; + case SSPP_RGB3: + ctx->pending_flush_mask |= BIT(19); + break; + case SSPP_DMA0: + ctx->pending_flush_mask |= BIT(11); + break; + case SSPP_DMA1: + ctx->pending_flush_mask |= BIT(12); + break; + case SSPP_DMA2: + ctx->pending_flush_mask |= BIT(24); + break; + case SSPP_DMA3: + ctx->pending_flush_mask |= BIT(25); + break; + case SSPP_CURSOR0: + ctx->pending_flush_mask |= BIT(22); + break; + case SSPP_CURSOR1: + ctx->pending_flush_mask |= BIT(23); + break; + default: + break; + } +} + +static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx, + enum dpu_lm lm) +{ + switch (lm) { + case LM_0: + ctx->pending_flush_mask |= BIT(6); + break; + case LM_1: + ctx->pending_flush_mask |= BIT(7); + break; + case LM_2: + ctx->pending_flush_mask |= BIT(8); + break; + case LM_3: + ctx->pending_flush_mask |= BIT(9); + break; + case LM_4: + ctx->pending_flush_mask |= BIT(10); + break; + case LM_5: + ctx->pending_flush_mask |= BIT(20); + break; + default: + break; + } + + ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL; +} + +static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx, + enum dpu_intf intf) +{ + switch (intf) { + case INTF_0: + ctx->pending_flush_mask |= BIT(31); + break; + case INTF_1: + ctx->pending_flush_mask |= BIT(30); + break; + case INTF_2: + ctx->pending_flush_mask |= BIT(29); + break; + case INTF_3: + ctx->pending_flush_mask |= BIT(28); + break; + default: + break; + } +} + +static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx, + enum dpu_wb wb) +{ + switch (wb) { + case WB_0: + case WB_1: + case WB_2: + ctx->pending_flush_mask |= BIT(WB_IDX); + break; + default: + break; + } +} + +static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx, + enum dpu_wb wb) +{ + ctx->pending_wb_flush_mask |= BIT(wb - WB_0); + ctx->pending_flush_mask |= BIT(WB_IDX); +} + +static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx, + enum dpu_intf intf) +{ + ctx->pending_intf_flush_mask |= BIT(intf - INTF_0); + ctx->pending_flush_mask |= BIT(INTF_IDX); +} + +static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx, + enum dpu_merge_3d merge_3d) +{ + ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0); + ctx->pending_flush_mask |= BIT(MERGE_3D_IDX); +} + +static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx, + enum dpu_dspp dspp) +{ + switch (dspp) { + case DSPP_0: + ctx->pending_flush_mask |= BIT(13); + break; + case DSPP_1: + ctx->pending_flush_mask |= BIT(14); + break; + case DSPP_2: + ctx->pending_flush_mask |= BIT(15); + break; + case DSPP_3: + ctx->pending_flush_mask |= BIT(21); + break; + default: + break; + } +} + +static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + ktime_t timeout; + u32 status; + + timeout = ktime_add_us(ktime_get(), timeout_us); + + /* + * it takes around 30us to have mdp finish resetting its ctl path + * poll every 50us so that reset should be completed at 1st poll + */ + do { + status = DPU_REG_READ(c, CTL_SW_RESET); + status &= 0x1; + if (status) + usleep_range(20, 50); + } while (status && ktime_compare_safe(ktime_get(), timeout) < 0); + + return status; +} + +static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + + pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx); + DPU_REG_WRITE(c, CTL_SW_RESET, 0x1); + if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) + return -EINVAL; + + return 0; +} + +static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 status; + + status = DPU_REG_READ(c, CTL_SW_RESET); + status &= 0x01; + if (!status) + return 0; + + pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx); + if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) { + pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx); + return -EINVAL; + } + + return 0; +} + +static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + int i; + + for (i = 0; i < ctx->mixer_count; i++) { + enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id; + + DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0); + DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0); + } + + DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0); +} + +static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx, + enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 mixercfg = 0, mixercfg_ext = 0, mix, ext; + u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0; + int i, j; + int stages; + int pipes_per_stage; + + stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); + if (stages < 0) + return; + + if (test_bit(DPU_MIXER_SOURCESPLIT, + &ctx->mixer_hw_caps->features)) + pipes_per_stage = PIPES_PER_STAGE; + else + pipes_per_stage = 1; + + mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */ + + if (!stage_cfg) + goto exit; + + for (i = 0; i <= stages; i++) { + /* overflow to ext register if 'i + 1 > 7' */ + mix = (i + 1) & 0x7; + ext = i >= 7; + + for (j = 0 ; j < pipes_per_stage; j++) { + enum dpu_sspp_multirect_index rect_index = + stage_cfg->multirect_index[i][j]; + + switch (stage_cfg->stage[i][j]) { + case SSPP_VIG0: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext3 |= ((i + 1) & 0xF) << 0; + } else { + mixercfg |= mix << 0; + mixercfg_ext |= ext << 0; + } + break; + case SSPP_VIG1: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext3 |= ((i + 1) & 0xF) << 4; + } else { + mixercfg |= mix << 3; + mixercfg_ext |= ext << 2; + } + break; + case SSPP_VIG2: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext3 |= ((i + 1) & 0xF) << 8; + } else { + mixercfg |= mix << 6; + mixercfg_ext |= ext << 4; + } + break; + case SSPP_VIG3: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext3 |= ((i + 1) & 0xF) << 12; + } else { + mixercfg |= mix << 26; + mixercfg_ext |= ext << 6; + } + break; + case SSPP_RGB0: + mixercfg |= mix << 9; + mixercfg_ext |= ext << 8; + break; + case SSPP_RGB1: + mixercfg |= mix << 12; + mixercfg_ext |= ext << 10; + break; + case SSPP_RGB2: + mixercfg |= mix << 15; + mixercfg_ext |= ext << 12; + break; + case SSPP_RGB3: + mixercfg |= mix << 29; + mixercfg_ext |= ext << 14; + break; + case SSPP_DMA0: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext2 |= ((i + 1) & 0xF) << 8; + } else { + mixercfg |= mix << 18; + mixercfg_ext |= ext << 16; + } + break; + case SSPP_DMA1: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext2 |= ((i + 1) & 0xF) << 12; + } else { + mixercfg |= mix << 21; + mixercfg_ext |= ext << 18; + } + break; + case SSPP_DMA2: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext2 |= ((i + 1) & 0xF) << 16; + } else { + mix |= (i + 1) & 0xF; + mixercfg_ext2 |= mix << 0; + } + break; + case SSPP_DMA3: + if (rect_index == DPU_SSPP_RECT_1) { + mixercfg_ext2 |= ((i + 1) & 0xF) << 20; + } else { + mix |= (i + 1) & 0xF; + mixercfg_ext2 |= mix << 4; + } + break; + case SSPP_CURSOR0: + mixercfg_ext |= ((i + 1) & 0xF) << 20; + break; + case SSPP_CURSOR1: + mixercfg_ext |= ((i + 1) & 0xF) << 26; + break; + default: + break; + } + } + } + +exit: + DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg); + DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext); + DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2); + DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); +} + + +static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, + struct dpu_hw_intf_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 intf_active = 0; + u32 wb_active = 0; + u32 mode_sel = 0; + + /* CTL_TOP[31:28] carries group_id to collate CTL paths + * per VM. Explicitly disable it until VM support is + * added in SW. Power on reset value is not disable. + */ + if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features))) + mode_sel = CTL_DEFAULT_GROUP_ID << 28; + + if (cfg->dsc) + DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc); + + if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD) + mode_sel |= BIT(17); + + intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); + wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); + + if (cfg->intf) + intf_active |= BIT(cfg->intf - INTF_0); + + if (cfg->wb) + wb_active |= BIT(cfg->wb - WB_0); + + DPU_REG_WRITE(c, CTL_TOP, mode_sel); + DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); + DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); + + if (cfg->merge_3d) + DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, + BIT(cfg->merge_3d - MERGE_3D_0)); + if (cfg->dsc) { + DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, BIT(DSC_IDX)); + DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc); + } +} + +static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, + struct dpu_hw_intf_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 intf_cfg = 0; + + intf_cfg |= (cfg->intf & 0xF) << 4; + + if (cfg->mode_3d) { + intf_cfg |= BIT(19); + intf_cfg |= (cfg->mode_3d - 0x1) << 20; + } + + if (cfg->wb) + intf_cfg |= (cfg->wb & 0x3) + 2; + + switch (cfg->intf_mode_sel) { + case DPU_CTL_MODE_SEL_VID: + intf_cfg &= ~BIT(17); + intf_cfg &= ~(0x3 << 15); + break; + case DPU_CTL_MODE_SEL_CMD: + intf_cfg |= BIT(17); + intf_cfg |= ((cfg->stream_sel & 0x3) << 15); + break; + default: + pr_err("unknown interface type %d\n", cfg->intf_mode_sel); + return; + } + + DPU_REG_WRITE(c, CTL_TOP, intf_cfg); +} + +static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, + struct dpu_hw_intf_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 intf_active = 0; + u32 wb_active = 0; + u32 merge3d_active = 0; + + /* + * This API resets each portion of the CTL path namely, + * clearing the sspps staged on the lm, merge_3d block, + * interfaces , writeback etc to ensure clean teardown of the pipeline. + * This will be used for writeback to begin with to have a + * proper teardown of the writeback session but upon further + * validation, this can be extended to all interfaces. + */ + if (cfg->merge_3d) { + merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE); + merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0); + DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, + merge3d_active); + } + + dpu_hw_ctl_clear_all_blendstages(ctx); + + if (cfg->intf) { + intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); + intf_active &= ~BIT(cfg->intf - INTF_0); + DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); + } + + if (cfg->wb) { + wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); + wb_active &= ~BIT(cfg->wb - WB_0); + DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); + } +} + +static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx, + unsigned long *fetch_active) +{ + int i; + u32 val = 0; + + if (fetch_active) { + for (i = 0; i < SSPP_MAX; i++) { + if (test_bit(i, fetch_active) && + fetch_tbl[i] != CTL_INVALID_BIT) + val |= BIT(fetch_tbl[i]); + } + } + + DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val); +} + +static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, + unsigned long cap) +{ + if (cap & BIT(DPU_CTL_ACTIVE_CFG)) { + ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1; + ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1; + ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1; + ops->update_pending_flush_intf = + dpu_hw_ctl_update_pending_flush_intf_v1; + ops->update_pending_flush_merge_3d = + dpu_hw_ctl_update_pending_flush_merge_3d_v1; + ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1; + } else { + ops->trigger_flush = dpu_hw_ctl_trigger_flush; + ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; + ops->update_pending_flush_intf = + dpu_hw_ctl_update_pending_flush_intf; + ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb; + } + ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; + ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; + ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; + ops->get_flush_register = dpu_hw_ctl_get_flush_register; + ops->trigger_start = dpu_hw_ctl_trigger_start; + ops->is_started = dpu_hw_ctl_is_started; + ops->trigger_pending = dpu_hw_ctl_trigger_pending; + ops->reset = dpu_hw_ctl_reset_control; + ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; + ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; + ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; + ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp; + ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer; + ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp; + if (cap & BIT(DPU_CTL_FETCH_ACTIVE)) + ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; +}; + +struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_ctl *c; + const struct dpu_ctl_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _ctl_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + pr_err("failed to create dpu_hw_ctl %d\n", idx); + return ERR_PTR(-EINVAL); + } + + c->caps = cfg; + _setup_ctl_ops(&c->ops, c->caps->features); + c->idx = idx; + c->mixer_count = m->mixer_count; + c->mixer_hw_caps = m->mixer; + + return c; +} + +void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx) +{ + kfree(ctx); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h new file mode 100644 index 000000000..96c012ec8 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DPU_HW_CTL_H +#define _DPU_HW_CTL_H + +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_sspp.h" + +/** + * dpu_ctl_mode_sel: Interface mode selection + * DPU_CTL_MODE_SEL_VID: Video mode interface + * DPU_CTL_MODE_SEL_CMD: Command mode interface + */ +enum dpu_ctl_mode_sel { + DPU_CTL_MODE_SEL_VID = 0, + DPU_CTL_MODE_SEL_CMD +}; + +struct dpu_hw_ctl; +/** + * struct dpu_hw_stage_cfg - blending stage cfg + * @stage : SSPP_ID at each stage + * @multirect_index: index of the rectangle of SSPP. + */ +struct dpu_hw_stage_cfg { + enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE]; + enum dpu_sspp_multirect_index multirect_index + [DPU_STAGE_MAX][PIPES_PER_STAGE]; +}; + +/** + * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface + * @intf : Interface id + * @mode_3d: 3d mux configuration + * @merge_3d: 3d merge block used + * @intf_mode_sel: Interface mode, cmd / vid + * @stream_sel: Stream selection for multi-stream interfaces + * @dsc: DSC BIT masks used + */ +struct dpu_hw_intf_cfg { + enum dpu_intf intf; + enum dpu_wb wb; + enum dpu_3d_blend_mode mode_3d; + enum dpu_merge_3d merge_3d; + enum dpu_ctl_mode_sel intf_mode_sel; + int stream_sel; + unsigned int dsc; +}; + +/** + * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions + * Assumption is these functions will be called after clocks are enabled + */ +struct dpu_hw_ctl_ops { + /** + * kickoff hw operation for Sw controlled interfaces + * DSI cmd mode and WB interface are SW controlled + * @ctx : ctl path ctx pointer + */ + void (*trigger_start)(struct dpu_hw_ctl *ctx); + + /** + * check if the ctl is started + * @ctx : ctl path ctx pointer + * @Return: true if started, false if stopped + */ + bool (*is_started)(struct dpu_hw_ctl *ctx); + + /** + * kickoff prepare is in progress hw operation for sw + * controlled interfaces: DSI cmd mode and WB interface + * are SW controlled + * @ctx : ctl path ctx pointer + */ + void (*trigger_pending)(struct dpu_hw_ctl *ctx); + + /** + * Clear the value of the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + */ + void (*clear_pending_flush)(struct dpu_hw_ctl *ctx); + + /** + * Query the value of the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + */ + u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx); + + /** + * OR in the given flushbits to the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @flushbits : module flushmask + */ + void (*update_pending_flush)(struct dpu_hw_ctl *ctx, + u32 flushbits); + + /** + * OR in the given flushbits to the cached pending_(wb_)flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : writeback block index + */ + void (*update_pending_flush_wb)(struct dpu_hw_ctl *ctx, + enum dpu_wb blk); + + /** + * OR in the given flushbits to the cached pending_(intf_)flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : interface block index + */ + void (*update_pending_flush_intf)(struct dpu_hw_ctl *ctx, + enum dpu_intf blk); + + /** + * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : interface block index + */ + void (*update_pending_flush_merge_3d)(struct dpu_hw_ctl *ctx, + enum dpu_merge_3d blk); + + /** + * OR in the given flushbits to the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : SSPP block index + */ + void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx, + enum dpu_sspp blk); + + /** + * OR in the given flushbits to the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : LM block index + */ + void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx, + enum dpu_lm blk); + + /** + * OR in the given flushbits to the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : DSPP block index + */ + void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx, + enum dpu_dspp blk); + /** + * Write the value of the pending_flush_mask to hardware + * @ctx : ctl path ctx pointer + */ + void (*trigger_flush)(struct dpu_hw_ctl *ctx); + + /** + * Read the value of the flush register + * @ctx : ctl path ctx pointer + * @Return: value of the ctl flush register. + */ + u32 (*get_flush_register)(struct dpu_hw_ctl *ctx); + + /** + * Setup ctl_path interface config + * @ctx + * @cfg : interface config structure pointer + */ + void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx, + struct dpu_hw_intf_cfg *cfg); + + /** + * reset ctl_path interface config + * @ctx : ctl path ctx pointer + * @cfg : interface config structure pointer + */ + void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx, + struct dpu_hw_intf_cfg *cfg); + + int (*reset)(struct dpu_hw_ctl *c); + + /* + * wait_reset_status - checks ctl reset status + * @ctx : ctl path ctx pointer + * + * This function checks the ctl reset status bit. + * If the reset bit is set, it keeps polling the status till the hw + * reset is complete. + * Returns: 0 on success or -error if reset incomplete within interval + */ + int (*wait_reset_status)(struct dpu_hw_ctl *ctx); + + /** + * Set all blend stages to disabled + * @ctx : ctl path ctx pointer + */ + void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx); + + /** + * Configure layer mixer to pipe configuration + * @ctx : ctl path ctx pointer + * @lm : layer mixer enumeration + * @cfg : blend stage configuration + */ + void (*setup_blendstage)(struct dpu_hw_ctl *ctx, + enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg); + + void (*set_active_pipes)(struct dpu_hw_ctl *ctx, + unsigned long *fetch_active); +}; + +/** + * struct dpu_hw_ctl : CTL PATH driver object + * @base: hardware block base structure + * @hw: block register map object + * @idx: control path index + * @caps: control path capabilities + * @mixer_count: number of mixers + * @mixer_hw_caps: mixer hardware capabilities + * @pending_flush_mask: storage for pending ctl_flush managed via ops + * @pending_intf_flush_mask: pending INTF flush + * @pending_wb_flush_mask: pending WB flush + * @ops: operation list + */ +struct dpu_hw_ctl { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* ctl path */ + int idx; + const struct dpu_ctl_cfg *caps; + int mixer_count; + const struct dpu_lm_cfg *mixer_hw_caps; + u32 pending_flush_mask; + u32 pending_intf_flush_mask; + u32 pending_wb_flush_mask; + u32 pending_merge_3d_flush_mask; + + /* ops */ + struct dpu_hw_ctl_ops ops; +}; + +/** + * dpu_hw_ctl - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_ctl, base); +} + +/** + * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object. + * should be called before accessing every ctl path registers. + * @idx: ctl_path index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_ctl_destroy(): Destroys ctl driver context + * should be called to free the context + */ +void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx); + +#endif /*_DPU_HW_CTL_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c new file mode 100644 index 000000000..c8f145558 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2022, Linaro Limited + */ + +#include "dpu_kms.h" +#include "dpu_hw_catalog.h" +#include "dpu_hwio.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_dsc.h" + +#define DSC_COMMON_MODE 0x000 +#define DSC_ENC 0x004 +#define DSC_PICTURE 0x008 +#define DSC_SLICE 0x00C +#define DSC_CHUNK_SIZE 0x010 +#define DSC_DELAY 0x014 +#define DSC_SCALE_INITIAL 0x018 +#define DSC_SCALE_DEC_INTERVAL 0x01C +#define DSC_SCALE_INC_INTERVAL 0x020 +#define DSC_FIRST_LINE_BPG_OFFSET 0x024 +#define DSC_BPG_OFFSET 0x028 +#define DSC_DSC_OFFSET 0x02C +#define DSC_FLATNESS 0x030 +#define DSC_RC_MODEL_SIZE 0x034 +#define DSC_RC 0x038 +#define DSC_RC_BUF_THRESH 0x03C +#define DSC_RANGE_MIN_QP 0x074 +#define DSC_RANGE_MAX_QP 0x0B0 +#define DSC_RANGE_BPG_OFFSET 0x0EC + +static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc) +{ + struct dpu_hw_blk_reg_map *c = &dsc->hw; + + DPU_REG_WRITE(c, DSC_COMMON_MODE, 0); +} + +static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, + struct drm_dsc_config *dsc, + u32 mode, + u32 initial_lines) +{ + struct dpu_hw_blk_reg_map *c = &hw_dsc->hw; + u32 data; + u32 slice_last_group_size; + u32 det_thresh_flatness; + bool is_cmd_mode = !(mode & DSC_MODE_VIDEO); + + DPU_REG_WRITE(c, DSC_COMMON_MODE, mode); + + if (is_cmd_mode) + initial_lines += 1; + + slice_last_group_size = (dsc->slice_width + 2) % 3; + + data = (initial_lines << 20); + data |= (slice_last_group_size << 18); + /* bpp is 6.4 format, 4 LSBs bits are for fractional part */ + data |= (dsc->bits_per_pixel << 8); + data |= (dsc->block_pred_enable << 7); + data |= (dsc->line_buf_depth << 3); + data |= (dsc->simple_422 << 2); + data |= (dsc->convert_rgb << 1); + data |= dsc->bits_per_component; + + DPU_REG_WRITE(c, DSC_ENC, data); + + data = dsc->pic_width << 16; + data |= dsc->pic_height; + DPU_REG_WRITE(c, DSC_PICTURE, data); + + data = dsc->slice_width << 16; + data |= dsc->slice_height; + DPU_REG_WRITE(c, DSC_SLICE, data); + + data = dsc->slice_chunk_size << 16; + DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data); + + data = dsc->initial_dec_delay << 16; + data |= dsc->initial_xmit_delay; + DPU_REG_WRITE(c, DSC_DELAY, data); + + data = dsc->initial_scale_value; + DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data); + + data = dsc->scale_decrement_interval; + DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data); + + data = dsc->scale_increment_interval; + DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data); + + data = dsc->first_line_bpg_offset; + DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data); + + data = dsc->nfl_bpg_offset << 16; + data |= dsc->slice_bpg_offset; + DPU_REG_WRITE(c, DSC_BPG_OFFSET, data); + + data = dsc->initial_offset << 16; + data |= dsc->final_offset; + DPU_REG_WRITE(c, DSC_DSC_OFFSET, data); + + det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8); + data = det_thresh_flatness << 10; + data |= dsc->flatness_max_qp << 5; + data |= dsc->flatness_min_qp; + DPU_REG_WRITE(c, DSC_FLATNESS, data); + + data = dsc->rc_model_size; + DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data); + + data = dsc->rc_tgt_offset_low << 18; + data |= dsc->rc_tgt_offset_high << 14; + data |= dsc->rc_quant_incr_limit1 << 9; + data |= dsc->rc_quant_incr_limit0 << 4; + data |= dsc->rc_edge_factor; + DPU_REG_WRITE(c, DSC_RC, data); +} + +static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc, + struct drm_dsc_config *dsc) +{ + struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params; + struct dpu_hw_blk_reg_map *c = &hw_dsc->hw; + u32 off; + int i; + + off = DSC_RC_BUF_THRESH; + for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) { + DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]); + off += 4; + } + + off = DSC_RANGE_MIN_QP; + for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { + DPU_REG_WRITE(c, off, rc[i].range_min_qp); + off += 4; + } + + off = DSC_RANGE_MAX_QP; + for (i = 0; i < 15; i++) { + DPU_REG_WRITE(c, off, rc[i].range_max_qp); + off += 4; + } + + off = DSC_RANGE_BPG_OFFSET; + for (i = 0; i < 15; i++) { + DPU_REG_WRITE(c, off, rc[i].range_bpg_offset); + off += 4; + } +} + +static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->dsc_count; i++) { + if (dsc == m->dsc[i].id) { + b->blk_addr = addr + m->dsc[i].base; + b->log_mask = DPU_DBG_MASK_DSC; + return &m->dsc[i]; + } + } + + return NULL; +} + +static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops, + unsigned long cap) +{ + ops->dsc_disable = dpu_hw_dsc_disable; + ops->dsc_config = dpu_hw_dsc_config; + ops->dsc_config_thresh = dpu_hw_dsc_config_thresh; +}; + +struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_dsc *c; + struct dpu_dsc_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _dsc_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + c->idx = idx; + c->caps = cfg; + _setup_dsc_ops(&c->ops, c->caps->features); + + return c; +} + +void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc) +{ + kfree(dsc); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h new file mode 100644 index 000000000..c0b77fe1a --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2020-2022, Linaro Limited */ + +#ifndef _DPU_HW_DSC_H +#define _DPU_HW_DSC_H + +#include + +#define DSC_MODE_SPLIT_PANEL BIT(0) +#define DSC_MODE_MULTIPLEX BIT(1) +#define DSC_MODE_VIDEO BIT(2) + +struct dpu_hw_dsc; + +/** + * struct dpu_hw_dsc_ops - interface to the dsc hardware driver functions + * Assumption is these functions will be called after clocks are enabled + */ +struct dpu_hw_dsc_ops { + /** + * dsc_disable - disable dsc + * @hw_dsc: Pointer to dsc context + */ + void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc); + + /** + * dsc_config - configures dsc encoder + * @hw_dsc: Pointer to dsc context + * @dsc: panel dsc parameters + * @mode: dsc topology mode to be set + * @initial_lines: amount of initial lines to be used + */ + void (*dsc_config)(struct dpu_hw_dsc *hw_dsc, + struct drm_dsc_config *dsc, + u32 mode, + u32 initial_lines); + + /** + * dsc_config_thresh - programs panel thresholds + * @hw_dsc: Pointer to dsc context + * @dsc: panel dsc parameters + */ + void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc, + struct drm_dsc_config *dsc); +}; + +struct dpu_hw_dsc { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* dsc */ + enum dpu_dsc idx; + const struct dpu_dsc_cfg *caps; + + /* ops */ + struct dpu_hw_dsc_ops ops; +}; + +/** + * dpu_hw_dsc_init - initializes the dsc block for the passed dsc idx. + * @idx: DSC index for which driver object is required + * @addr: Mapped register io address of MDP + * @m: Pointer to mdss catalog data + * Returns: Error code or allocated dpu_hw_dsc context + */ +struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_dsc_destroy - destroys dsc driver context + * @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init + */ +void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc); + +static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_dsc, base); +} + +#endif /* _DPU_HW_DSC_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c new file mode 100644 index 000000000..8ab5ace34 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_dspp.h" +#include "dpu_kms.h" + + +/* DSPP_PCC */ +#define PCC_EN BIT(0) +#define PCC_DIS 0 +#define PCC_RED_R_OFF 0x10 +#define PCC_RED_G_OFF 0x1C +#define PCC_RED_B_OFF 0x28 +#define PCC_GREEN_R_OFF 0x14 +#define PCC_GREEN_G_OFF 0x20 +#define PCC_GREEN_B_OFF 0x2C +#define PCC_BLUE_R_OFF 0x18 +#define PCC_BLUE_G_OFF 0x24 +#define PCC_BLUE_B_OFF 0x30 + +static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, + struct dpu_hw_pcc_cfg *cfg) +{ + + u32 base; + + if (!ctx) { + DRM_ERROR("invalid ctx %pK\n", ctx); + return; + } + + base = ctx->cap->sblk->pcc.base; + + if (!base) { + DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base); + return; + } + + if (!cfg) { + DRM_DEBUG_DRIVER("disable pcc feature\n"); + DPU_REG_WRITE(&ctx->hw, base, PCC_DIS); + return; + } + + DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r); + DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g); + DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b); + + DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r); + DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g); + DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b); + + DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r); + DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g); + DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b); + + DPU_REG_WRITE(&ctx->hw, base, PCC_EN); +} + +static void _setup_dspp_ops(struct dpu_hw_dspp *c, + unsigned long features) +{ + if (test_bit(DPU_DSPP_PCC, &features)) + c->ops.setup_pcc = dpu_setup_dspp_pcc; +} + +static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + if (!m || !addr || !b) + return ERR_PTR(-EINVAL); + + for (i = 0; i < m->dspp_count; i++) { + if (dspp == m->dspp[i].id) { + b->blk_addr = addr + m->dspp[i].base; + b->log_mask = DPU_DBG_MASK_DSPP; + return &m->dspp[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_dspp *c; + const struct dpu_dspp_cfg *cfg; + + if (!addr || !m) + return ERR_PTR(-EINVAL); + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _dspp_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->idx = idx; + c->cap = cfg; + _setup_dspp_ops(c, c->cap->features); + + return c; +} + +void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp) +{ + kfree(dspp); +} + + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h new file mode 100644 index 000000000..05ecfdfac --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_DSPP_H +#define _DPU_HW_DSPP_H + +struct dpu_hw_dspp; + +/** + * struct dpu_hw_pcc_coeff - PCC coefficient structure for each color + * component. + * @r: red coefficient. + * @g: green coefficient. + * @b: blue coefficient. + */ + +struct dpu_hw_pcc_coeff { + __u32 r; + __u32 g; + __u32 b; +}; + +/** + * struct dpu_hw_pcc - pcc feature structure + * @r: red coefficients. + * @g: green coefficients. + * @b: blue coefficients. + */ +struct dpu_hw_pcc_cfg { + struct dpu_hw_pcc_coeff r; + struct dpu_hw_pcc_coeff g; + struct dpu_hw_pcc_coeff b; +}; + +/** + * struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions + * Caller must call the init function to get the dspp context for each dspp + * Assumption is these functions will be called after clocks are enabled + */ +struct dpu_hw_dspp_ops { + /** + * setup_pcc - setup dspp pcc + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg); + +}; + +/** + * struct dpu_hw_dspp - dspp description + * @base: Hardware block base structure + * @hw: Block hardware details + * @idx: DSPP index + * @cap: Pointer to layer_cfg + * @ops: Pointer to operations possible for this DSPP + */ +struct dpu_hw_dspp { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* dspp */ + int idx; + const struct dpu_dspp_cfg *cap; + + /* Ops */ + struct dpu_hw_dspp_ops ops; +}; + +/** + * dpu_hw_dspp - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_dspp, base); +} + +/** + * dpu_hw_dspp_init - initializes the dspp hw driver object. + * should be called once before accessing every dspp. + * @idx: DSPP index for which driver object is required + * @addr: Mapped register io address of MDP + * @Return: pointer to structure or ERR_PTR + */ +struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx, + void __iomem *addr, const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_dspp_destroy(): Destroys DSPP driver context + * @dspp: Pointer to DSPP driver context + */ +void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp); + +#endif /*_DPU_HW_DSPP_H */ + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c new file mode 100644 index 000000000..75e1b89c9 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include + +#include "dpu_core_irq.h" +#include "dpu_kms.h" +#include "dpu_hw_interrupts.h" +#include "dpu_hw_util.h" +#include "dpu_hw_mdss.h" +#include "dpu_trace.h" + +/* + * Register offsets in MDSS register file for the interrupt registers + * w.r.t. the MDP base + */ +#define MDP_SSPP_TOP0_OFF 0x0 +#define MDP_INTF_0_OFF 0x6A000 +#define MDP_INTF_1_OFF 0x6A800 +#define MDP_INTF_2_OFF 0x6B000 +#define MDP_INTF_3_OFF 0x6B800 +#define MDP_INTF_4_OFF 0x6C000 +#define MDP_INTF_5_OFF 0x6C800 +#define INTF_INTR_EN 0x1c0 +#define INTF_INTR_STATUS 0x1c4 +#define INTF_INTR_CLEAR 0x1c8 +#define MDP_AD4_0_OFF 0x7C000 +#define MDP_AD4_1_OFF 0x7D000 +#define MDP_AD4_INTR_EN_OFF 0x41c +#define MDP_AD4_INTR_CLEAR_OFF 0x424 +#define MDP_AD4_INTR_STATUS_OFF 0x420 +#define MDP_INTF_0_OFF_REV_7xxx 0x34000 +#define MDP_INTF_1_OFF_REV_7xxx 0x35000 +#define MDP_INTF_2_OFF_REV_7xxx 0x36000 +#define MDP_INTF_3_OFF_REV_7xxx 0x37000 +#define MDP_INTF_4_OFF_REV_7xxx 0x38000 +#define MDP_INTF_5_OFF_REV_7xxx 0x39000 + +/** + * struct dpu_intr_reg - array of DPU register sets + * @clr_off: offset to CLEAR reg + * @en_off: offset to ENABLE reg + * @status_off: offset to STATUS reg + */ +struct dpu_intr_reg { + u32 clr_off; + u32 en_off; + u32 status_off; +}; + +/* + * struct dpu_intr_reg - List of DPU interrupt registers + * + * When making changes be sure to sync with dpu_hw_intr_reg + */ +static const struct dpu_intr_reg dpu_intr_set[] = { + [MDP_SSPP_TOP0_INTR] = { + MDP_SSPP_TOP0_OFF+INTR_CLEAR, + MDP_SSPP_TOP0_OFF+INTR_EN, + MDP_SSPP_TOP0_OFF+INTR_STATUS + }, + [MDP_SSPP_TOP0_INTR2] = { + MDP_SSPP_TOP0_OFF+INTR2_CLEAR, + MDP_SSPP_TOP0_OFF+INTR2_EN, + MDP_SSPP_TOP0_OFF+INTR2_STATUS + }, + [MDP_SSPP_TOP0_HIST_INTR] = { + MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR, + MDP_SSPP_TOP0_OFF+HIST_INTR_EN, + MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS + }, + [MDP_INTF0_INTR] = { + MDP_INTF_0_OFF+INTF_INTR_CLEAR, + MDP_INTF_0_OFF+INTF_INTR_EN, + MDP_INTF_0_OFF+INTF_INTR_STATUS + }, + [MDP_INTF1_INTR] = { + MDP_INTF_1_OFF+INTF_INTR_CLEAR, + MDP_INTF_1_OFF+INTF_INTR_EN, + MDP_INTF_1_OFF+INTF_INTR_STATUS + }, + [MDP_INTF2_INTR] = { + MDP_INTF_2_OFF+INTF_INTR_CLEAR, + MDP_INTF_2_OFF+INTF_INTR_EN, + MDP_INTF_2_OFF+INTF_INTR_STATUS + }, + [MDP_INTF3_INTR] = { + MDP_INTF_3_OFF+INTF_INTR_CLEAR, + MDP_INTF_3_OFF+INTF_INTR_EN, + MDP_INTF_3_OFF+INTF_INTR_STATUS + }, + [MDP_INTF4_INTR] = { + MDP_INTF_4_OFF+INTF_INTR_CLEAR, + MDP_INTF_4_OFF+INTF_INTR_EN, + MDP_INTF_4_OFF+INTF_INTR_STATUS + }, + [MDP_INTF5_INTR] = { + MDP_INTF_5_OFF+INTF_INTR_CLEAR, + MDP_INTF_5_OFF+INTF_INTR_EN, + MDP_INTF_5_OFF+INTF_INTR_STATUS + }, + [MDP_AD4_0_INTR] = { + MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF, + MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF, + MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF, + }, + [MDP_AD4_1_INTR] = { + MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF, + MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF, + MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF, + }, + [MDP_INTF0_7xxx_INTR] = { + MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS + }, + [MDP_INTF1_7xxx_INTR] = { + MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS + }, + [MDP_INTF2_7xxx_INTR] = { + MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS + }, + [MDP_INTF3_7xxx_INTR] = { + MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS + }, + [MDP_INTF4_7xxx_INTR] = { + MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS + }, + [MDP_INTF5_7xxx_INTR] = { + MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR, + MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN, + MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS + }, +}; + +#define DPU_IRQ_REG(irq_idx) (irq_idx / 32) +#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32)) + +/** + * dpu_core_irq_callback_handler - dispatch core interrupts + * @dpu_kms: Pointer to DPU's KMS structure + * @irq_idx: interrupt index + */ +static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx) +{ + VERB("irq_idx=%d\n", irq_idx); + + if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb) + DRM_ERROR("no registered cb, idx:%d\n", irq_idx); + + atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count); + + /* + * Perform registered function callback + */ + dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx); +} + +irqreturn_t dpu_core_irq(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + struct dpu_hw_intr *intr = dpu_kms->hw_intr; + int reg_idx; + int irq_idx; + u32 irq_status; + u32 enable_mask; + int bit; + unsigned long irq_flags; + + if (!intr) + return IRQ_NONE; + + spin_lock_irqsave(&intr->irq_lock, irq_flags); + for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) { + if (!test_bit(reg_idx, &intr->irq_mask)) + continue; + + /* Read interrupt status */ + irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off); + + /* Read enable mask */ + enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off); + + /* and clear the interrupt */ + if (irq_status) + DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, + irq_status); + + /* Finally update IRQ status based on enable mask */ + irq_status &= enable_mask; + + if (!irq_status) + continue; + + /* + * Search through matching intr status. + */ + while ((bit = ffs(irq_status)) != 0) { + irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1); + + dpu_core_irq_callback_handler(dpu_kms, irq_idx); + + /* + * When callback finish, clear the irq_status + * with the matching mask. Once irq_status + * is all cleared, the search can be stopped. + */ + irq_status &= ~BIT(bit - 1); + } + } + + /* ensure register writes go through */ + wmb(); + + spin_unlock_irqrestore(&intr->irq_lock, irq_flags); + + return IRQ_HANDLED; +} + +static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) +{ + int reg_idx; + const struct dpu_intr_reg *reg; + const char *dbgstr = NULL; + uint32_t cache_irq_mask; + + if (!intr) + return -EINVAL; + + if (irq_idx < 0 || irq_idx >= intr->total_irqs) { + pr_err("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + /* + * The cache_irq_mask and hardware RMW operations needs to be done + * under irq_lock and it's the caller's responsibility to ensure that's + * held. + */ + assert_spin_locked(&intr->irq_lock); + + reg_idx = DPU_IRQ_REG(irq_idx); + reg = &dpu_intr_set[reg_idx]; + + cache_irq_mask = intr->cache_irq_mask[reg_idx]; + if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) { + dbgstr = "DPU IRQ already set:"; + } else { + dbgstr = "DPU IRQ enabled:"; + + cache_irq_mask |= DPU_IRQ_MASK(irq_idx); + /* Cleaning any pending interrupt */ + DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx)); + /* Enabling interrupts with the new mask */ + DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask); + + /* ensure register write goes through */ + wmb(); + + intr->cache_irq_mask[reg_idx] = cache_irq_mask; + } + + pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr, + DPU_IRQ_MASK(irq_idx), cache_irq_mask); + + return 0; +} + +static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) +{ + int reg_idx; + const struct dpu_intr_reg *reg; + const char *dbgstr = NULL; + uint32_t cache_irq_mask; + + if (!intr) + return -EINVAL; + + if (irq_idx < 0 || irq_idx >= intr->total_irqs) { + pr_err("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + /* + * The cache_irq_mask and hardware RMW operations needs to be done + * under irq_lock and it's the caller's responsibility to ensure that's + * held. + */ + assert_spin_locked(&intr->irq_lock); + + reg_idx = DPU_IRQ_REG(irq_idx); + reg = &dpu_intr_set[reg_idx]; + + cache_irq_mask = intr->cache_irq_mask[reg_idx]; + if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) { + dbgstr = "DPU IRQ is already cleared:"; + } else { + dbgstr = "DPU IRQ mask disable:"; + + cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx); + /* Disable interrupts based on the new mask */ + DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask); + /* Cleaning any pending interrupt */ + DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx)); + + /* ensure register write goes through */ + wmb(); + + intr->cache_irq_mask[reg_idx] = cache_irq_mask; + } + + pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr, + DPU_IRQ_MASK(irq_idx), cache_irq_mask); + + return 0; +} + +static void dpu_clear_irqs(struct dpu_kms *dpu_kms) +{ + struct dpu_hw_intr *intr = dpu_kms->hw_intr; + int i; + + if (!intr) + return; + + for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { + if (test_bit(i, &intr->irq_mask)) + DPU_REG_WRITE(&intr->hw, + dpu_intr_set[i].clr_off, 0xffffffff); + } + + /* ensure register writes go through */ + wmb(); +} + +static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) +{ + struct dpu_hw_intr *intr = dpu_kms->hw_intr; + int i; + + if (!intr) + return; + + for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { + if (test_bit(i, &intr->irq_mask)) + DPU_REG_WRITE(&intr->hw, + dpu_intr_set[i].en_off, 0x00000000); + } + + /* ensure register writes go through */ + wmb(); +} + +u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx) +{ + struct dpu_hw_intr *intr = dpu_kms->hw_intr; + int reg_idx; + unsigned long irq_flags; + u32 intr_status; + + if (!intr) + return 0; + + if (irq_idx < 0) { + DPU_ERROR("[%pS] invalid irq_idx=%d\n", + __builtin_return_address(0), irq_idx); + return 0; + } + + if (irq_idx < 0 || irq_idx >= intr->total_irqs) { + pr_err("invalid IRQ index: [%d]\n", irq_idx); + return 0; + } + + spin_lock_irqsave(&intr->irq_lock, irq_flags); + + reg_idx = DPU_IRQ_REG(irq_idx); + intr_status = DPU_REG_READ(&intr->hw, + dpu_intr_set[reg_idx].status_off) & + DPU_IRQ_MASK(irq_idx); + if (intr_status) + DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, + intr_status); + + /* ensure register writes go through */ + wmb(); + + spin_unlock_irqrestore(&intr->irq_lock, irq_flags); + + return intr_status; +} + +static void __intr_offset(const struct dpu_mdss_cfg *m, + void __iomem *addr, struct dpu_hw_blk_reg_map *hw) +{ + hw->blk_addr = addr + m->mdp[0].base; +} + +struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_intr *intr; + int nirq = MDP_INTR_MAX * 32; + + if (!addr || !m) + return ERR_PTR(-EINVAL); + + intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL); + if (!intr) + return ERR_PTR(-ENOMEM); + + __intr_offset(m, addr, &intr->hw); + + intr->total_irqs = nirq; + + intr->irq_mask = m->mdss_irqs; + + spin_lock_init(&intr->irq_lock); + + return intr; +} + +void dpu_hw_intr_destroy(struct dpu_hw_intr *intr) +{ + kfree(intr); +} + +int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, + void (*irq_cb)(void *arg, int irq_idx), + void *irq_arg) +{ + unsigned long irq_flags; + int ret; + + if (!irq_cb) { + DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb); + return -EINVAL; + } + + if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { + DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); + + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + + if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) { + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + return -EBUSY; + } + + trace_dpu_core_irq_register_callback(irq_idx, irq_cb); + dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg; + dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb; + + ret = dpu_hw_intr_enable_irq_locked( + dpu_kms->hw_intr, + irq_idx); + if (ret) + DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n", + irq_idx); + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + trace_dpu_irq_register_success(irq_idx); + + return 0; +} + +int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx) +{ + unsigned long irq_flags; + int ret; + + if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { + DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); + + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + trace_dpu_core_irq_unregister_callback(irq_idx); + + ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx); + if (ret) + DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n", + irq_idx, ret); + + dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL; + dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL; + + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + trace_dpu_irq_unregister_success(irq_idx); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) +{ + struct dpu_kms *dpu_kms = s->private; + unsigned long irq_flags; + int i, irq_count; + void *cb; + + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count); + cb = dpu_kms->hw_intr->irq_tbl[i].cb; + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + if (irq_count || cb) + seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); + +void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, + struct dentry *parent) +{ + debugfs_create_file("core_irq", 0600, parent, dpu_kms, + &dpu_debugfs_core_irq_fops); +} +#endif + +void dpu_core_irq_preinstall(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + int i; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + dpu_clear_irqs(dpu_kms); + dpu_disable_all_irqs(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); + + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) + atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0); +} + +void dpu_core_irq_uninstall(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + int i; + + if (!dpu_kms->hw_intr) + return; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) + if (dpu_kms->hw_intr->irq_tbl[i].cb) + DPU_ERROR("irq_idx=%d still enabled/registered\n", i); + + dpu_clear_irqs(dpu_kms); + dpu_disable_all_irqs(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h new file mode 100644 index 000000000..464439554 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_INTERRUPTS_H +#define _DPU_HW_INTERRUPTS_H + +#include + +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_util.h" +#include "dpu_hw_mdss.h" + +/* When making changes be sure to sync with dpu_intr_set */ +enum dpu_hw_intr_reg { + MDP_SSPP_TOP0_INTR, + MDP_SSPP_TOP0_INTR2, + MDP_SSPP_TOP0_HIST_INTR, + MDP_INTF0_INTR, + MDP_INTF1_INTR, + MDP_INTF2_INTR, + MDP_INTF3_INTR, + MDP_INTF4_INTR, + MDP_INTF5_INTR, + MDP_AD4_0_INTR, + MDP_AD4_1_INTR, + MDP_INTF0_7xxx_INTR, + MDP_INTF1_7xxx_INTR, + MDP_INTF2_7xxx_INTR, + MDP_INTF3_7xxx_INTR, + MDP_INTF4_7xxx_INTR, + MDP_INTF5_7xxx_INTR, + MDP_INTR_MAX, +}; + +#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset) + +/** + * struct dpu_hw_intr: hw interrupts handling data structure + * @hw: virtual address mapping + * @ops: function pointer mapping for IRQ handling + * @cache_irq_mask: array of IRQ enable masks reg storage created during init + * @save_irq_status: array of IRQ status reg storage created during init + * @total_irqs: total number of irq_idx mapped in the hw_interrupts + * @irq_lock: spinlock for accessing IRQ resources + * @irq_cb_tbl: array of IRQ callbacks + */ +struct dpu_hw_intr { + struct dpu_hw_blk_reg_map hw; + u32 cache_irq_mask[MDP_INTR_MAX]; + u32 *save_irq_status; + u32 total_irqs; + spinlock_t irq_lock; + unsigned long irq_mask; + + struct { + void (*cb)(void *arg, int irq_idx); + void *arg; + atomic_t count; + } irq_tbl[]; +}; + +/** + * dpu_hw_intr_init(): Initializes the interrupts hw object + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_intr_destroy(): Cleanup interrutps hw object + * @intr: pointer to interrupts hw object + */ +void dpu_hw_intr_destroy(struct dpu_hw_intr *intr); +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c new file mode 100644 index 000000000..384558d2f --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_intf.h" +#include "dpu_kms.h" + +#define INTF_TIMING_ENGINE_EN 0x000 +#define INTF_CONFIG 0x004 +#define INTF_HSYNC_CTL 0x008 +#define INTF_VSYNC_PERIOD_F0 0x00C +#define INTF_VSYNC_PERIOD_F1 0x010 +#define INTF_VSYNC_PULSE_WIDTH_F0 0x014 +#define INTF_VSYNC_PULSE_WIDTH_F1 0x018 +#define INTF_DISPLAY_V_START_F0 0x01C +#define INTF_DISPLAY_V_START_F1 0x020 +#define INTF_DISPLAY_V_END_F0 0x024 +#define INTF_DISPLAY_V_END_F1 0x028 +#define INTF_ACTIVE_V_START_F0 0x02C +#define INTF_ACTIVE_V_START_F1 0x030 +#define INTF_ACTIVE_V_END_F0 0x034 +#define INTF_ACTIVE_V_END_F1 0x038 +#define INTF_DISPLAY_HCTL 0x03C +#define INTF_ACTIVE_HCTL 0x040 +#define INTF_BORDER_COLOR 0x044 +#define INTF_UNDERFLOW_COLOR 0x048 +#define INTF_HSYNC_SKEW 0x04C +#define INTF_POLARITY_CTL 0x050 +#define INTF_TEST_CTL 0x054 +#define INTF_TP_COLOR0 0x058 +#define INTF_TP_COLOR1 0x05C +#define INTF_CONFIG2 0x060 +#define INTF_DISPLAY_DATA_HCTL 0x064 +#define INTF_ACTIVE_DATA_HCTL 0x068 +#define INTF_FRAME_LINE_COUNT_EN 0x0A8 +#define INTF_FRAME_COUNT 0x0AC +#define INTF_LINE_COUNT 0x0B0 + +#define INTF_DEFLICKER_CONFIG 0x0F0 +#define INTF_DEFLICKER_STRNG_COEFF 0x0F4 +#define INTF_DEFLICKER_WEAK_COEFF 0x0F8 + +#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084 +#define INTF_PANEL_FORMAT 0x090 +#define INTF_TPG_ENABLE 0x100 +#define INTF_TPG_MAIN_CONTROL 0x104 +#define INTF_TPG_VIDEO_CONFIG 0x108 +#define INTF_TPG_COMPONENT_LIMITS 0x10C +#define INTF_TPG_RECTANGLE 0x110 +#define INTF_TPG_INITIAL_VALUE 0x114 +#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118 +#define INTF_TPG_RGB_MAPPING 0x11C +#define INTF_PROG_FETCH_START 0x170 +#define INTF_PROG_ROT_START 0x174 +#define INTF_MUX 0x25C +#define INTF_STATUS 0x26C + +#define INTF_CFG_ACTIVE_H_EN BIT(29) +#define INTF_CFG_ACTIVE_V_EN BIT(30) + +#define INTF_CFG2_DATABUS_WIDEN BIT(0) +#define INTF_CFG2_DATA_HCTL_EN BIT(4) + +#define INTF_MISR_CTRL 0x180 +#define INTF_MISR_SIGNATURE 0x184 + +static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->intf_count; i++) { + if ((intf == m->intf[i].id) && + (m->intf[i].type != INTF_NONE)) { + b->blk_addr = addr + m->intf[i].base; + b->log_mask = DPU_DBG_MASK_INTF; + return &m->intf[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx, + const struct intf_timing_params *p, + const struct dpu_format *fmt) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 hsync_period, vsync_period; + u32 display_v_start, display_v_end; + u32 hsync_start_x, hsync_end_x; + u32 hsync_data_start_x, hsync_data_end_x; + u32 active_h_start, active_h_end; + u32 active_v_start, active_v_end; + u32 active_hctl, display_hctl, hsync_ctl; + u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity; + u32 panel_format; + u32 intf_cfg, intf_cfg2 = 0; + u32 display_data_hctl = 0, active_data_hctl = 0; + u32 data_width; + bool dp_intf = false; + + /* read interface_cfg */ + intf_cfg = DPU_REG_READ(c, INTF_CONFIG); + + if (ctx->cap->type == INTF_DP) + dp_intf = true; + + hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width + + p->h_front_porch; + vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height + + p->v_front_porch; + + display_v_start = ((p->vsync_pulse_width + p->v_back_porch) * + hsync_period) + p->hsync_skew; + display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) + + p->hsync_skew - 1; + + hsync_start_x = p->h_back_porch + p->hsync_pulse_width; + hsync_end_x = hsync_period - p->h_front_porch - 1; + + if (p->width != p->xres) { /* border fill added */ + active_h_start = hsync_start_x; + active_h_end = active_h_start + p->xres - 1; + } else { + active_h_start = 0; + active_h_end = 0; + } + + if (p->height != p->yres) { /* border fill added */ + active_v_start = display_v_start; + active_v_end = active_v_start + (p->yres * hsync_period) - 1; + } else { + active_v_start = 0; + active_v_end = 0; + } + + if (active_h_end) { + active_hctl = (active_h_end << 16) | active_h_start; + intf_cfg |= INTF_CFG_ACTIVE_H_EN; + } else { + active_hctl = 0; + } + + if (active_v_end) + intf_cfg |= INTF_CFG_ACTIVE_V_EN; + + hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width; + display_hctl = (hsync_end_x << 16) | hsync_start_x; + + /* + * DATA_HCTL_EN controls data timing which can be different from + * video timing. It is recommended to enable it for all cases, except + * if compression is enabled in 1 pixel per clock mode + */ + if (p->wide_bus_en) + intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN; + + data_width = p->width; + + hsync_data_start_x = hsync_start_x; + hsync_data_end_x = hsync_start_x + data_width - 1; + + display_data_hctl = (hsync_data_end_x << 16) | hsync_data_start_x; + + if (dp_intf) { + /* DP timing adjustment */ + display_v_start += p->hsync_pulse_width + p->h_back_porch; + display_v_end -= p->h_front_porch; + + active_h_start = hsync_start_x; + active_h_end = active_h_start + p->xres - 1; + active_v_start = display_v_start; + active_v_end = active_v_start + (p->yres * hsync_period) - 1; + + active_hctl = (active_h_end << 16) | active_h_start; + display_hctl = active_hctl; + + intf_cfg |= INTF_CFG_ACTIVE_H_EN | INTF_CFG_ACTIVE_V_EN; + } + + den_polarity = 0; + if (ctx->cap->type == INTF_HDMI) { + hsync_polarity = p->yres >= 720 ? 0 : 1; + vsync_polarity = p->yres >= 720 ? 0 : 1; + } else if (ctx->cap->type == INTF_DP) { + hsync_polarity = p->hsync_polarity; + vsync_polarity = p->vsync_polarity; + } else { + hsync_polarity = 0; + vsync_polarity = 0; + } + polarity_ctl = (den_polarity << 2) | /* DEN Polarity */ + (vsync_polarity << 1) | /* VSYNC Polarity */ + (hsync_polarity << 0); /* HSYNC Polarity */ + + if (!DPU_FORMAT_IS_YUV(fmt)) + panel_format = (fmt->bits[C0_G_Y] | + (fmt->bits[C1_B_Cb] << 2) | + (fmt->bits[C2_R_Cr] << 4) | + (0x21 << 8)); + else + /* Interface treats all the pixel data in RGB888 format */ + panel_format = (COLOR_8BIT | + (COLOR_8BIT << 2) | + (COLOR_8BIT << 4) | + (0x21 << 8)); + + DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl); + DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period); + DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0, + p->vsync_pulse_width * hsync_period); + DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl); + DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start); + DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end); + DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl); + DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start); + DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end); + DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr); + DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr); + DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew); + DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl); + DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3); + DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg); + DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format); + if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) { + DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2); + DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl); + DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl); + } +} + +static void dpu_hw_intf_enable_timing_engine( + struct dpu_hw_intf *intf, + u8 enable) +{ + struct dpu_hw_blk_reg_map *c = &intf->hw; + /* Note: Display interface select is handled in top block hw layer */ + DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0); +} + +static void dpu_hw_intf_setup_prg_fetch( + struct dpu_hw_intf *intf, + const struct intf_prog_fetch *fetch) +{ + struct dpu_hw_blk_reg_map *c = &intf->hw; + int fetch_enable; + + /* + * Fetch should always be outside the active lines. If the fetching + * is programmed within active region, hardware behavior is unknown. + */ + + fetch_enable = DPU_REG_READ(c, INTF_CONFIG); + if (fetch->enable) { + fetch_enable |= BIT(31); + DPU_REG_WRITE(c, INTF_PROG_FETCH_START, + fetch->fetch_start); + } else { + fetch_enable &= ~BIT(31); + } + + DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable); +} + +static void dpu_hw_intf_bind_pingpong_blk( + struct dpu_hw_intf *intf, + bool enable, + const enum dpu_pingpong pp) +{ + struct dpu_hw_blk_reg_map *c = &intf->hw; + u32 mux_cfg; + + mux_cfg = DPU_REG_READ(c, INTF_MUX); + mux_cfg &= ~0xf; + + if (enable) + mux_cfg |= (pp - PINGPONG_0) & 0x7; + else + mux_cfg |= 0xf; + + DPU_REG_WRITE(c, INTF_MUX, mux_cfg); +} + +static void dpu_hw_intf_get_status( + struct dpu_hw_intf *intf, + struct intf_status *s) +{ + struct dpu_hw_blk_reg_map *c = &intf->hw; + unsigned long cap = intf->cap->features; + + if (cap & BIT(DPU_INTF_STATUS_SUPPORTED)) + s->is_en = DPU_REG_READ(c, INTF_STATUS) & BIT(0); + else + s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN); + + s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31)); + if (s->is_en) { + s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT); + s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT); + } else { + s->line_count = 0; + s->frame_count = 0; + } +} + +static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf) +{ + struct dpu_hw_blk_reg_map *c; + + if (!intf) + return 0; + + c = &intf->hw; + + return DPU_REG_READ(c, INTF_LINE_COUNT); +} + +static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf) +{ + dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1); +} + +static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value) +{ + return dpu_hw_collect_misr(&intf->hw, INTF_MISR_CTRL, INTF_MISR_SIGNATURE, misr_value); +} + +static void _setup_intf_ops(struct dpu_hw_intf_ops *ops, + unsigned long cap) +{ + ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine; + ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch; + ops->get_status = dpu_hw_intf_get_status; + ops->enable_timing = dpu_hw_intf_enable_timing_engine; + ops->get_line_count = dpu_hw_intf_get_line_count; + if (cap & BIT(DPU_INTF_INPUT_CTRL)) + ops->bind_pingpong_blk = dpu_hw_intf_bind_pingpong_blk; + ops->setup_misr = dpu_hw_intf_setup_misr; + ops->collect_misr = dpu_hw_intf_collect_misr; +} + +struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_intf *c; + const struct dpu_intf_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _intf_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + pr_err("failed to create dpu_hw_intf %d\n", idx); + return ERR_PTR(-EINVAL); + } + + /* + * Assign ops + */ + c->idx = idx; + c->cap = cfg; + c->mdss = m; + _setup_intf_ops(&c->ops, c->cap->features); + + return c; +} + +void dpu_hw_intf_destroy(struct dpu_hw_intf *intf) +{ + kfree(intf); +} + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h new file mode 100644 index 000000000..e75339b96 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_INTF_H +#define _DPU_HW_INTF_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" + +struct dpu_hw_intf; + +/* intf timing settings */ +struct intf_timing_params { + u32 width; /* active width */ + u32 height; /* active height */ + u32 xres; /* Display panel width */ + u32 yres; /* Display panel height */ + + u32 h_back_porch; + u32 h_front_porch; + u32 v_back_porch; + u32 v_front_porch; + u32 hsync_pulse_width; + u32 vsync_pulse_width; + u32 hsync_polarity; + u32 vsync_polarity; + u32 border_clr; + u32 underflow_clr; + u32 hsync_skew; + + bool wide_bus_en; +}; + +struct intf_prog_fetch { + u8 enable; + /* vsync counter for the front porch pixel line */ + u32 fetch_start; +}; + +struct intf_status { + u8 is_en; /* interface timing engine is enabled or not */ + u8 is_prog_fetch_en; /* interface prog fetch counter is enabled or not */ + u32 frame_count; /* frame count since timing engine enabled */ + u32 line_count; /* current line count including blanking */ +}; + +/** + * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions + * Assumption is these functions will be called after clocks are enabled + * @ setup_timing_gen : programs the timing engine + * @ setup_prog_fetch : enables/disables the programmable fetch logic + * @ enable_timing: enable/disable timing engine + * @ get_status: returns if timing engine is enabled or not + * @ get_line_count: reads current vertical line counter + * @bind_pingpong_blk: enable/disable the connection with pingpong which will + * feed pixels to this interface + * @setup_misr: enable/disable MISR + * @collect_misr: read MISR signature + */ +struct dpu_hw_intf_ops { + void (*setup_timing_gen)(struct dpu_hw_intf *intf, + const struct intf_timing_params *p, + const struct dpu_format *fmt); + + void (*setup_prg_fetch)(struct dpu_hw_intf *intf, + const struct intf_prog_fetch *fetch); + + void (*enable_timing)(struct dpu_hw_intf *intf, + u8 enable); + + void (*get_status)(struct dpu_hw_intf *intf, + struct intf_status *status); + + u32 (*get_line_count)(struct dpu_hw_intf *intf); + + void (*bind_pingpong_blk)(struct dpu_hw_intf *intf, + bool enable, + const enum dpu_pingpong pp); + void (*setup_misr)(struct dpu_hw_intf *intf); + int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value); +}; + +struct dpu_hw_intf { + struct dpu_hw_blk_reg_map hw; + + /* intf */ + enum dpu_intf idx; + const struct dpu_intf_cfg *cap; + const struct dpu_mdss_cfg *mdss; + + /* ops */ + struct dpu_hw_intf_ops ops; +}; + +/** + * dpu_hw_intf_init(): Initializes the intf driver for the passed + * interface idx. + * @idx: interface index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_intf_destroy(): Destroys INTF driver context + * @intf: Pointer to INTF driver context + */ +void dpu_hw_intf_destroy(struct dpu_hw_intf *intf); + +#endif /*_DPU_HW_INTF_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c new file mode 100644 index 000000000..cc04fb979 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + */ + +#include "dpu_kms.h" +#include "dpu_hw_catalog.h" +#include "dpu_hwio.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_mdss.h" + +#define LM_OP_MODE 0x00 +#define LM_OUT_SIZE 0x04 +#define LM_BORDER_COLOR_0 0x08 +#define LM_BORDER_COLOR_1 0x010 + +/* These register are offset to mixer base + stage base */ +#define LM_BLEND0_OP 0x00 +#define LM_BLEND0_CONST_ALPHA 0x04 +#define LM_FG_COLOR_FILL_COLOR_0 0x08 +#define LM_FG_COLOR_FILL_COLOR_1 0x0C +#define LM_FG_COLOR_FILL_SIZE 0x10 +#define LM_FG_COLOR_FILL_XY 0x14 + +#define LM_BLEND0_FG_ALPHA 0x04 +#define LM_BLEND0_BG_ALPHA 0x08 + +#define LM_MISR_CTRL 0x310 +#define LM_MISR_SIGNATURE 0x314 + + +static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->mixer_count; i++) { + if (mixer == m->mixer[i].id) { + b->blk_addr = addr + m->mixer[i].base; + b->log_mask = DPU_DBG_MASK_LM; + return &m->mixer[i]; + } + } + + return ERR_PTR(-ENOMEM); +} + +/** + * _stage_offset(): returns the relative offset of the blend registers + * for the stage to be setup + * @ctx: mixer ctx contains the mixer to be programmed + * @stage: stage index to setup + */ +static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage) +{ + const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk; + if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages) + return sblk->blendstage_base[stage - DPU_STAGE_0]; + + return -EINVAL; +} + +static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx, + struct dpu_hw_mixer_cfg *mixer) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 outsize; + u32 op_mode; + + op_mode = DPU_REG_READ(c, LM_OP_MODE); + + outsize = mixer->out_height << 16 | mixer->out_width; + DPU_REG_WRITE(c, LM_OUT_SIZE, outsize); + + /* SPLIT_LEFT_RIGHT */ + if (mixer->right_mixer) + op_mode |= BIT(31); + else + op_mode &= ~BIT(31); + DPU_REG_WRITE(c, LM_OP_MODE, op_mode); +} + +static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx, + struct dpu_mdss_color *color, + u8 border_en) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + + if (border_en) { + DPU_REG_WRITE(c, LM_BORDER_COLOR_0, + (color->color_0 & 0xFFF) | + ((color->color_1 & 0xFFF) << 0x10)); + DPU_REG_WRITE(c, LM_BORDER_COLOR_1, + (color->color_2 & 0xFFF) | + ((color->color_3 & 0xFFF) << 0x10)); + } +} + +static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx) +{ + dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0); +} + +static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value) +{ + return dpu_hw_collect_misr(&ctx->hw, LM_MISR_CTRL, LM_MISR_SIGNATURE, misr_value); +} + +static void dpu_hw_lm_setup_blend_config_combined_alpha(struct dpu_hw_mixer *ctx, + u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + int stage_off; + u32 const_alpha; + + if (stage == DPU_STAGE_BASE) + return; + + stage_off = _stage_offset(ctx, stage); + if (WARN_ON(stage_off < 0)) + return; + + const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16); + DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha); + DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); +} + +static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx, + u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + int stage_off; + + if (stage == DPU_STAGE_BASE) + return; + + stage_off = _stage_offset(ctx, stage); + if (WARN_ON(stage_off < 0)) + return; + + DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha); + DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha); + DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op); +} + +static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx, + uint32_t mixer_op_mode) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + int op_mode; + + /* read the existing op_mode configuration */ + op_mode = DPU_REG_READ(c, LM_OP_MODE); + + op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode; + + DPU_REG_WRITE(c, LM_OP_MODE, op_mode); +} + +static void _setup_mixer_ops(const struct dpu_mdss_cfg *m, + struct dpu_hw_lm_ops *ops, + unsigned long features) +{ + ops->setup_mixer_out = dpu_hw_lm_setup_out; + if (test_bit(DPU_MIXER_COMBINED_ALPHA, &features)) + ops->setup_blend_config = dpu_hw_lm_setup_blend_config_combined_alpha; + else + ops->setup_blend_config = dpu_hw_lm_setup_blend_config; + ops->setup_alpha_out = dpu_hw_lm_setup_color3; + ops->setup_border_color = dpu_hw_lm_setup_border_color; + ops->setup_misr = dpu_hw_lm_setup_misr; + ops->collect_misr = dpu_hw_lm_collect_misr; +} + +struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_mixer *c; + const struct dpu_lm_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _lm_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->idx = idx; + c->cap = cfg; + _setup_mixer_ops(m, &c->ops, c->cap->features); + + return c; +} + +void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm) +{ + kfree(lm); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h new file mode 100644 index 000000000..0a050eb24 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_LM_H +#define _DPU_HW_LM_H + +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" + +struct dpu_hw_mixer; + +struct dpu_hw_mixer_cfg { + u32 out_width; + u32 out_height; + bool right_mixer; + int flags; +}; + +struct dpu_hw_color3_cfg { + u8 keep_fg[DPU_STAGE_MAX]; +}; + +/** + * + * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions + * Assumption is these functions will be called after clocks are enabled + */ +struct dpu_hw_lm_ops { + /* + * Sets up mixer output width and height + * and border color if enabled + */ + void (*setup_mixer_out)(struct dpu_hw_mixer *ctx, + struct dpu_hw_mixer_cfg *cfg); + + /* + * Alpha blending configuration + * for the specified stage + */ + void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage, + uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op); + + /* + * Alpha color component selection from either fg or bg + */ + void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op); + + /** + * setup_border_color : enable/disable border color + */ + void (*setup_border_color)(struct dpu_hw_mixer *ctx, + struct dpu_mdss_color *color, + u8 border_en); + + /** + * setup_misr: Enable/disable MISR + */ + void (*setup_misr)(struct dpu_hw_mixer *ctx); + + /** + * collect_misr: Read MISR signature + */ + int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value); +}; + +struct dpu_hw_mixer { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* lm */ + enum dpu_lm idx; + const struct dpu_lm_cfg *cap; + const struct dpu_mdp_cfg *mdp; + const struct dpu_ctl_cfg *ctl; + + /* ops */ + struct dpu_hw_lm_ops ops; + + /* store mixer info specific to display */ + struct dpu_hw_mixer_cfg cfg; +}; + +/** + * to_dpu_hw_mixer - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_mixer, base); +} + +/** + * dpu_hw_lm_init(): Initializes the mixer hw driver object. + * should be called once before accessing every mixer. + * @idx: mixer index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_lm_destroy(): Destroys layer mixer driver context + * @lm: Pointer to LM driver context + */ +void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm); + +#endif /*_DPU_HW_LM_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h new file mode 100644 index 000000000..d3b0ed0a9 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -0,0 +1,459 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_MDSS_H +#define _DPU_HW_MDSS_H + +#include +#include + +#include "msm_drv.h" + +#define DPU_DBG_NAME "dpu" + +#define DPU_NONE 0 + +#ifndef DPU_CSC_MATRIX_COEFF_SIZE +#define DPU_CSC_MATRIX_COEFF_SIZE 9 +#endif + +#ifndef DPU_CSC_CLAMP_SIZE +#define DPU_CSC_CLAMP_SIZE 6 +#endif + +#ifndef DPU_CSC_BIAS_SIZE +#define DPU_CSC_BIAS_SIZE 3 +#endif + +#ifndef DPU_MAX_PLANES +#define DPU_MAX_PLANES 4 +#endif + +#define PIPES_PER_STAGE 2 +#ifndef DPU_MAX_DE_CURVES +#define DPU_MAX_DE_CURVES 3 +#endif + +enum dpu_format_flags { + DPU_FORMAT_FLAG_YUV_BIT, + DPU_FORMAT_FLAG_DX_BIT, + DPU_FORMAT_FLAG_COMPRESSED_BIT, + DPU_FORMAT_FLAG_BIT_MAX, +}; + +#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT) +#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT) +#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT) +#define DPU_FORMAT_IS_YUV(X) \ + (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag)) +#define DPU_FORMAT_IS_DX(X) \ + (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag)) +#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR) +#define DPU_FORMAT_IS_TILE(X) \ + (((X)->fetch_mode == DPU_FETCH_UBWC) && \ + !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag)) +#define DPU_FORMAT_IS_UBWC(X) \ + (((X)->fetch_mode == DPU_FETCH_UBWC) && \ + test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag)) + +#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0) +#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0) +#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0) +#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0) +#define DPU_BLEND_FG_INV_ALPHA (1 << 2) +#define DPU_BLEND_FG_MOD_ALPHA (1 << 3) +#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4) +#define DPU_BLEND_FG_TRANSP_EN (1 << 5) +#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8) +#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8) +#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8) +#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8) +#define DPU_BLEND_BG_INV_ALPHA (1 << 10) +#define DPU_BLEND_BG_MOD_ALPHA (1 << 11) +#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12) +#define DPU_BLEND_BG_TRANSP_EN (1 << 13) + +#define DPU_VSYNC0_SOURCE_GPIO 0 +#define DPU_VSYNC1_SOURCE_GPIO 1 +#define DPU_VSYNC2_SOURCE_GPIO 2 +#define DPU_VSYNC_SOURCE_INTF_0 3 +#define DPU_VSYNC_SOURCE_INTF_1 4 +#define DPU_VSYNC_SOURCE_INTF_2 5 +#define DPU_VSYNC_SOURCE_INTF_3 6 +#define DPU_VSYNC_SOURCE_WD_TIMER_4 11 +#define DPU_VSYNC_SOURCE_WD_TIMER_3 12 +#define DPU_VSYNC_SOURCE_WD_TIMER_2 13 +#define DPU_VSYNC_SOURCE_WD_TIMER_1 14 +#define DPU_VSYNC_SOURCE_WD_TIMER_0 15 + +enum dpu_hw_blk_type { + DPU_HW_BLK_TOP = 0, + DPU_HW_BLK_SSPP, + DPU_HW_BLK_LM, + DPU_HW_BLK_CTL, + DPU_HW_BLK_PINGPONG, + DPU_HW_BLK_INTF, + DPU_HW_BLK_WB, + DPU_HW_BLK_DSPP, + DPU_HW_BLK_MERGE_3D, + DPU_HW_BLK_DSC, + DPU_HW_BLK_MAX, +}; + +enum dpu_mdp { + MDP_TOP = 0x1, + MDP_MAX, +}; + +enum dpu_sspp { + SSPP_NONE, + SSPP_VIG0, + SSPP_VIG1, + SSPP_VIG2, + SSPP_VIG3, + SSPP_RGB0, + SSPP_RGB1, + SSPP_RGB2, + SSPP_RGB3, + SSPP_DMA0, + SSPP_DMA1, + SSPP_DMA2, + SSPP_DMA3, + SSPP_CURSOR0, + SSPP_CURSOR1, + SSPP_MAX +}; + +enum dpu_sspp_type { + SSPP_TYPE_VIG, + SSPP_TYPE_RGB, + SSPP_TYPE_DMA, + SSPP_TYPE_CURSOR, + SSPP_TYPE_MAX +}; + +enum dpu_lm { + LM_0 = 1, + LM_1, + LM_2, + LM_3, + LM_4, + LM_5, + LM_6, + LM_MAX +}; + +enum dpu_stage { + DPU_STAGE_BASE = 0, + DPU_STAGE_0, + DPU_STAGE_1, + DPU_STAGE_2, + DPU_STAGE_3, + DPU_STAGE_4, + DPU_STAGE_5, + DPU_STAGE_6, + DPU_STAGE_7, + DPU_STAGE_8, + DPU_STAGE_9, + DPU_STAGE_10, + DPU_STAGE_MAX +}; +enum dpu_dspp { + DSPP_0 = 1, + DSPP_1, + DSPP_2, + DSPP_3, + DSPP_MAX +}; + +enum dpu_ctl { + CTL_0 = 1, + CTL_1, + CTL_2, + CTL_3, + CTL_4, + CTL_5, + CTL_MAX +}; + +enum dpu_dsc { + DSC_NONE = 0, + DSC_0, + DSC_1, + DSC_2, + DSC_3, + DSC_4, + DSC_5, + DSC_MAX +}; + +enum dpu_pingpong { + PINGPONG_0 = 1, + PINGPONG_1, + PINGPONG_2, + PINGPONG_3, + PINGPONG_4, + PINGPONG_5, + PINGPONG_S0, + PINGPONG_MAX +}; + +enum dpu_merge_3d { + MERGE_3D_0 = 1, + MERGE_3D_1, + MERGE_3D_2, + MERGE_3D_MAX +}; + +enum dpu_intf { + INTF_0 = 1, + INTF_1, + INTF_2, + INTF_3, + INTF_4, + INTF_5, + INTF_6, + INTF_MAX +}; + +/* + * Historically these values correspond to the values written to the + * DISP_INTF_SEL register, which had to programmed manually. On newer MDP + * generations this register is NOP, but we keep the values for historical + * reasons. + */ +enum dpu_intf_type { + INTF_NONE = 0x0, + INTF_DSI = 0x1, + INTF_HDMI = 0x3, + INTF_LCDC = 0x5, + /* old eDP found on 8x74 and 8x84 */ + INTF_EDP = 0x9, + /* both DP and eDP, handled by the new DP driver */ + INTF_DP = 0xa, + + /* virtual interfaces */ + INTF_WB = 0x100, +}; + +enum dpu_intf_mode { + INTF_MODE_NONE = 0, + INTF_MODE_CMD, + INTF_MODE_VIDEO, + INTF_MODE_WB_BLOCK, + INTF_MODE_WB_LINE, + INTF_MODE_MAX +}; + +enum dpu_wb { + WB_0 = 1, + WB_1, + WB_2, + WB_3, + WB_MAX +}; + +enum dpu_cwb { + CWB_0 = 0x1, + CWB_1, + CWB_2, + CWB_3, + CWB_MAX +}; + +enum dpu_wd_timer { + WD_TIMER_0 = 0x1, + WD_TIMER_1, + WD_TIMER_2, + WD_TIMER_3, + WD_TIMER_4, + WD_TIMER_5, + WD_TIMER_MAX +}; + +enum dpu_vbif { + VBIF_RT, + VBIF_NRT, + VBIF_MAX, +}; + +/** + * DPU HW,Component order color map + */ +enum { + C0_G_Y = 0, + C1_B_Cb = 1, + C2_R_Cr = 2, + C3_ALPHA = 3 +}; + +/** + * enum dpu_plane_type - defines how the color component pixel packing + * @DPU_PLANE_INTERLEAVED : Color components in single plane + * @DPU_PLANE_PLANAR : Color component in separate planes + * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane + */ +enum dpu_plane_type { + DPU_PLANE_INTERLEAVED, + DPU_PLANE_PLANAR, + DPU_PLANE_PSEUDO_PLANAR, +}; + +/** + * enum dpu_chroma_samp_type - chroma sub-samplng type + * @DPU_CHROMA_RGB : No chroma subsampling + * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled + * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled + * @DPU_CHROMA_420 : 420 subsampling + */ +enum dpu_chroma_samp_type { + DPU_CHROMA_RGB, + DPU_CHROMA_H2V1, + DPU_CHROMA_H1V2, + DPU_CHROMA_420 +}; + +/** + * dpu_fetch_type - Defines How DPU HW fetches data + * @DPU_FETCH_LINEAR : fetch is line by line + * @DPU_FETCH_TILE : fetches data in Z order from a tile + * @DPU_FETCH_UBWC : fetch and decompress data + */ +enum dpu_fetch_type { + DPU_FETCH_LINEAR, + DPU_FETCH_TILE, + DPU_FETCH_UBWC +}; + +/** + * Value of enum chosen to fit the number of bits + * expected by the HW programming. + */ +enum { + COLOR_ALPHA_1BIT = 0, + COLOR_ALPHA_4BIT = 1, + COLOR_4BIT = 0, + COLOR_5BIT = 1, /* No 5-bit Alpha */ + COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */ + COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */ +}; + +/** + * enum dpu_3d_blend_mode + * Desribes how the 3d data is blended + * @BLEND_3D_NONE : 3d blending not enabled + * @BLEND_3D_FRAME_INT : Frame interleaving + * @BLEND_3D_H_ROW_INT : Horizontal row interleaving + * @BLEND_3D_V_ROW_INT : vertical row interleaving + * @BLEND_3D_COL_INT : column interleaving + * @BLEND_3D_MAX : + */ +enum dpu_3d_blend_mode { + BLEND_3D_NONE = 0, + BLEND_3D_FRAME_INT, + BLEND_3D_H_ROW_INT, + BLEND_3D_V_ROW_INT, + BLEND_3D_COL_INT, + BLEND_3D_MAX +}; + +/** struct dpu_format - defines the format configuration which + * allows DPU HW to correctly fetch and decode the format + * @base: base msm_format structure containing fourcc code + * @fetch_planes: how the color components are packed in pixel format + * @element: element color ordering + * @bits: element bit widths + * @chroma_sample: chroma sub-samplng type + * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB + * @unpack_tight: 0 for loose, 1 for tight + * @unpack_count: 0 = 1 component, 1 = 2 component + * @bpp: bytes per pixel + * @alpha_enable: whether the format has an alpha channel + * @num_planes: number of planes (including meta data planes) + * @fetch_mode: linear, tiled, or ubwc hw fetch behavior + * @flag: usage bit flags + * @tile_width: format tile width + * @tile_height: format tile height + */ +struct dpu_format { + struct msm_format base; + enum dpu_plane_type fetch_planes; + u8 element[DPU_MAX_PLANES]; + u8 bits[DPU_MAX_PLANES]; + enum dpu_chroma_samp_type chroma_sample; + u8 unpack_align_msb; + u8 unpack_tight; + u8 unpack_count; + u8 bpp; + u8 alpha_enable; + u8 num_planes; + enum dpu_fetch_type fetch_mode; + DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX); + u16 tile_width; + u16 tile_height; +}; +#define to_dpu_format(x) container_of(x, struct dpu_format, base) + +/** + * struct dpu_hw_fmt_layout - format information of the source pixel data + * @format: pixel format parameters + * @num_planes: number of planes (including meta data planes) + * @width: image width + * @height: image height + * @total_size: total size in bytes + * @plane_addr: address of each plane + * @plane_size: length of each plane + * @plane_pitch: pitch of each plane + */ +struct dpu_hw_fmt_layout { + const struct dpu_format *format; + uint32_t num_planes; + uint32_t width; + uint32_t height; + uint32_t total_size; + uint32_t plane_addr[DPU_MAX_PLANES]; + uint32_t plane_size[DPU_MAX_PLANES]; + uint32_t plane_pitch[DPU_MAX_PLANES]; +}; + +struct dpu_csc_cfg { + /* matrix coefficients in S15.16 format */ + uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE]; + uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE]; + uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE]; + uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE]; + uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE]; +}; + +/** + * struct dpu_mdss_color - mdss color description + * color 0 : green + * color 1 : blue + * color 2 : red + * color 3 : alpha + */ +struct dpu_mdss_color { + u32 color_0; + u32 color_1; + u32 color_2; + u32 color_3; +}; + +/* + * Define bit masks for h/w logging. + */ +#define DPU_DBG_MASK_NONE (1 << 0) +#define DPU_DBG_MASK_INTF (1 << 1) +#define DPU_DBG_MASK_LM (1 << 2) +#define DPU_DBG_MASK_CTL (1 << 3) +#define DPU_DBG_MASK_PINGPONG (1 << 4) +#define DPU_DBG_MASK_SSPP (1 << 5) +#define DPU_DBG_MASK_WB (1 << 6) +#define DPU_DBG_MASK_TOP (1 << 7) +#define DPU_DBG_MASK_VBIF (1 << 8) +#define DPU_DBG_MASK_ROT (1 << 9) +#define DPU_DBG_MASK_DSPP (1 << 10) +#define DPU_DBG_MASK_DSC (1 << 11) + +#endif /* _DPU_HW_MDSS_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c new file mode 100644 index 000000000..def0a87fd --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include + +#include "dpu_hw_mdss.h" +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_merge3d.h" +#include "dpu_kms.h" +#include "dpu_trace.h" + +#define MERGE_3D_MUX 0x000 +#define MERGE_3D_MODE 0x004 + +static const struct dpu_merge_3d_cfg *_merge_3d_offset(enum dpu_merge_3d idx, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->merge_3d_count; i++) { + if (idx == m->merge_3d[i].id) { + b->blk_addr = addr + m->merge_3d[i].base; + b->log_mask = DPU_DBG_MASK_PINGPONG; + return &m->merge_3d[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d, + enum dpu_3d_blend_mode mode_3d) +{ + struct dpu_hw_blk_reg_map *c; + u32 data; + + + c = &merge_3d->hw; + if (mode_3d == BLEND_3D_NONE) { + DPU_REG_WRITE(c, MERGE_3D_MODE, 0); + DPU_REG_WRITE(c, MERGE_3D_MUX, 0); + } else { + data = BIT(0) | ((mode_3d - 1) << 1); + DPU_REG_WRITE(c, MERGE_3D_MODE, data); + } +} + +static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c, + unsigned long features) +{ + c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode; +}; + +struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_merge_3d *c; + const struct dpu_merge_3d_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _merge_3d_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + c->idx = idx; + c->caps = cfg; + _setup_merge_3d_ops(c, c->caps->features); + + return c; +} + +void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw) +{ + kfree(hw); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h new file mode 100644 index 000000000..81fd1d5f7 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_MERGE3D_H +#define _DPU_HW_MERGE3D_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" + +struct dpu_hw_merge_3d; + +/** + * + * struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions + * Assumption is these functions will be called after clocks are enabled + * @setup_3d_mode : enable 3D merge + */ +struct dpu_hw_merge_3d_ops { + void (*setup_3d_mode)(struct dpu_hw_merge_3d *merge_3d, + enum dpu_3d_blend_mode mode_3d); + +}; + +struct dpu_hw_merge_3d { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* merge_3d */ + enum dpu_merge_3d idx; + const struct dpu_merge_3d_cfg *caps; + + /* ops */ + struct dpu_hw_merge_3d_ops ops; +}; + +/** + * to_dpu_hw_merge_3d - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_merge_3d, base); +} + +/** + * dpu_hw_merge_3d_init - initializes the merge_3d driver for the passed + * merge_3d idx. + * @idx: Pingpong index for which driver object is required + * @addr: Mapped register io address of MDP + * @m: Pointer to mdss catalog data + * Returns: Error code or allocated dpu_hw_merge_3d context + */ +struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_merge_3d_destroy - destroys merge_3d driver context + * should be called to free the context + * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init + */ +void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp); + +#endif /*_DPU_HW_MERGE3D_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c new file mode 100644 index 000000000..0fcad9760 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include + +#include "dpu_hw_mdss.h" +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_pingpong.h" +#include "dpu_kms.h" +#include "dpu_trace.h" + +#define PP_TEAR_CHECK_EN 0x000 +#define PP_SYNC_CONFIG_VSYNC 0x004 +#define PP_SYNC_CONFIG_HEIGHT 0x008 +#define PP_SYNC_WRCOUNT 0x00C +#define PP_VSYNC_INIT_VAL 0x010 +#define PP_INT_COUNT_VAL 0x014 +#define PP_SYNC_THRESH 0x018 +#define PP_START_POS 0x01C +#define PP_RD_PTR_IRQ 0x020 +#define PP_WR_PTR_IRQ 0x024 +#define PP_OUT_LINE_COUNT 0x028 +#define PP_LINE_COUNT 0x02C +#define PP_AUTOREFRESH_CONFIG 0x030 + +#define PP_FBC_MODE 0x034 +#define PP_FBC_BUDGET_CTL 0x038 +#define PP_FBC_LOSSY_MODE 0x03C +#define PP_DSC_MODE 0x0a0 +#define PP_DCE_DATA_IN_SWAP 0x0ac +#define PP_DCE_DATA_OUT_SWAP 0x0c8 + +#define PP_DITHER_EN 0x000 +#define PP_DITHER_BITDEPTH 0x004 +#define PP_DITHER_MATRIX 0x008 + +#define DITHER_DEPTH_MAP_INDEX 9 + +static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = { + 0, 0, 0, 0, 0, 0, 0, 1, 2 +}; + +static const struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->pingpong_count; i++) { + if (pp == m->pingpong[i].id) { + b->blk_addr = addr + m->pingpong[i].base; + b->log_mask = DPU_DBG_MASK_PINGPONG; + return &m->pingpong[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static void dpu_hw_pp_setup_dither(struct dpu_hw_pingpong *pp, + struct dpu_hw_dither_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c; + u32 i, base, data = 0; + + c = &pp->hw; + base = pp->caps->sblk->dither.base; + if (!cfg) { + DPU_REG_WRITE(c, base + PP_DITHER_EN, 0); + return; + } + + data = dither_depth_map[cfg->c0_bitdepth] & REG_MASK(2); + data |= (dither_depth_map[cfg->c1_bitdepth] & REG_MASK(2)) << 2; + data |= (dither_depth_map[cfg->c2_bitdepth] & REG_MASK(2)) << 4; + data |= (dither_depth_map[cfg->c3_bitdepth] & REG_MASK(2)) << 6; + data |= (cfg->temporal_en) ? (1 << 8) : 0; + + DPU_REG_WRITE(c, base + PP_DITHER_BITDEPTH, data); + + for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) { + data = (cfg->matrix[i] & REG_MASK(4)) | + ((cfg->matrix[i + 1] & REG_MASK(4)) << 4) | + ((cfg->matrix[i + 2] & REG_MASK(4)) << 8) | + ((cfg->matrix[i + 3] & REG_MASK(4)) << 12); + DPU_REG_WRITE(c, base + PP_DITHER_MATRIX + i, data); + } + DPU_REG_WRITE(c, base + PP_DITHER_EN, 1); +} + +static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp, + struct dpu_hw_tear_check *te) +{ + struct dpu_hw_blk_reg_map *c; + int cfg; + + if (!pp || !te) + return -EINVAL; + c = &pp->hw; + + cfg = BIT(19); /*VSYNC_COUNTER_EN */ + if (te->hw_vsync_mode) + cfg |= BIT(20); + + cfg |= te->vsync_count; + + DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg); + DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height); + DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val); + DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq); + DPU_REG_WRITE(c, PP_START_POS, te->start_pos); + DPU_REG_WRITE(c, PP_SYNC_THRESH, + ((te->sync_threshold_continue << 16) | + te->sync_threshold_start)); + DPU_REG_WRITE(c, PP_SYNC_WRCOUNT, + (te->start_pos + te->sync_threshold_start + 1)); + + return 0; +} + +static void dpu_hw_pp_setup_autorefresh_config(struct dpu_hw_pingpong *pp, + u32 frame_count, bool enable) +{ + DPU_REG_WRITE(&pp->hw, PP_AUTOREFRESH_CONFIG, + enable ? (BIT(31) | frame_count) : 0); +} + +/* + * dpu_hw_pp_get_autorefresh_config - Get autorefresh config from HW + * @pp: DPU pingpong structure + * @frame_count: Used to return the current frame count from hw + * + * Returns: True if autorefresh enabled, false if disabled. + */ +static bool dpu_hw_pp_get_autorefresh_config(struct dpu_hw_pingpong *pp, + u32 *frame_count) +{ + u32 val = DPU_REG_READ(&pp->hw, PP_AUTOREFRESH_CONFIG); + if (frame_count != NULL) + *frame_count = val & 0xffff; + return !!((val & BIT(31)) >> 31); +} + +static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp, + u32 timeout_us) +{ + struct dpu_hw_blk_reg_map *c; + u32 val; + int rc; + + if (!pp) + return -EINVAL; + + c = &pp->hw; + rc = readl_poll_timeout(c->blk_addr + PP_LINE_COUNT, + val, (val & 0xffff) >= 1, 10, timeout_us); + + return rc; +} + +static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable) +{ + struct dpu_hw_blk_reg_map *c; + + if (!pp) + return -EINVAL; + c = &pp->hw; + + DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable); + return 0; +} + +static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp, + bool enable_external_te) +{ + struct dpu_hw_blk_reg_map *c = &pp->hw; + u32 cfg; + int orig; + + if (!pp) + return -EINVAL; + + c = &pp->hw; + cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC); + orig = (bool)(cfg & BIT(20)); + if (enable_external_te) + cfg |= BIT(20); + else + cfg &= ~BIT(20); + DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg); + trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg); + + return orig; +} + +static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp, + struct dpu_hw_pp_vsync_info *info) +{ + struct dpu_hw_blk_reg_map *c; + u32 val; + + if (!pp || !info) + return -EINVAL; + c = &pp->hw; + + val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL); + info->rd_ptr_init_val = val & 0xffff; + + val = DPU_REG_READ(c, PP_INT_COUNT_VAL); + info->rd_ptr_frame_count = (val & 0xffff0000) >> 16; + info->rd_ptr_line_count = val & 0xffff; + + val = DPU_REG_READ(c, PP_LINE_COUNT); + info->wr_ptr_line_count = val & 0xffff; + + return 0; +} + +static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp) +{ + struct dpu_hw_blk_reg_map *c = &pp->hw; + u32 height, init; + u32 line = 0xFFFF; + + if (!pp) + return 0; + c = &pp->hw; + + init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF; + height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF; + + if (height < init) + return line; + + line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF; + + if (line < init) + line += (0xFFFF - init); + else + line -= init; + + return line; +} + +static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp) +{ + struct dpu_hw_blk_reg_map *c = &pp->hw; + + DPU_REG_WRITE(c, PP_DSC_MODE, 1); + return 0; +} + +static void dpu_hw_pp_dsc_disable(struct dpu_hw_pingpong *pp) +{ + struct dpu_hw_blk_reg_map *c = &pp->hw; + + DPU_REG_WRITE(c, PP_DSC_MODE, 0); +} + +static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp) +{ + struct dpu_hw_blk_reg_map *pp_c = &pp->hw; + int data; + + data = DPU_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP); + data |= BIT(18); /* endian flip */ + DPU_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data); + return 0; +} + +static void _setup_pingpong_ops(struct dpu_hw_pingpong *c, + unsigned long features) +{ + c->ops.setup_tearcheck = dpu_hw_pp_setup_te_config; + c->ops.enable_tearcheck = dpu_hw_pp_enable_te; + c->ops.connect_external_te = dpu_hw_pp_connect_external_te; + c->ops.get_vsync_info = dpu_hw_pp_get_vsync_info; + c->ops.setup_autorefresh = dpu_hw_pp_setup_autorefresh_config; + c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config; + c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr; + c->ops.get_line_count = dpu_hw_pp_get_line_count; + c->ops.setup_dsc = dpu_hw_pp_setup_dsc; + c->ops.enable_dsc = dpu_hw_pp_dsc_enable; + c->ops.disable_dsc = dpu_hw_pp_dsc_disable; + + if (test_bit(DPU_PINGPONG_DITHER, &features)) + c->ops.setup_dither = dpu_hw_pp_setup_dither; +}; + +struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_pingpong *c; + const struct dpu_pingpong_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _pingpong_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + c->idx = idx; + c->caps = cfg; + _setup_pingpong_ops(c, c->caps->features); + + return c; +} + +void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp) +{ + kfree(pp); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h new file mode 100644 index 000000000..c00223441 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_PINGPONG_H +#define _DPU_HW_PINGPONG_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" + +#define DITHER_MATRIX_SZ 16 + +struct dpu_hw_pingpong; + +struct dpu_hw_tear_check { + /* + * This is ratio of MDP VSYNC clk freq(Hz) to + * refresh rate divided by no of lines + */ + u32 vsync_count; + u32 sync_cfg_height; + u32 vsync_init_val; + u32 sync_threshold_start; + u32 sync_threshold_continue; + u32 start_pos; + u32 rd_ptr_irq; + u8 hw_vsync_mode; +}; + +struct dpu_hw_pp_vsync_info { + u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */ + u32 rd_ptr_frame_count; /* num frames sent since enabling interface */ + u32 rd_ptr_line_count; /* current line on panel (rd ptr) */ + u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */ +}; + +/** + * struct dpu_hw_dither_cfg - dither feature structure + * @flags: for customizing operations + * @temporal_en: temperal dither enable + * @c0_bitdepth: c0 component bit depth + * @c1_bitdepth: c1 component bit depth + * @c2_bitdepth: c2 component bit depth + * @c3_bitdepth: c2 component bit depth + * @matrix: dither strength matrix + */ +struct dpu_hw_dither_cfg { + u64 flags; + u32 temporal_en; + u32 c0_bitdepth; + u32 c1_bitdepth; + u32 c2_bitdepth; + u32 c3_bitdepth; + u32 matrix[DITHER_MATRIX_SZ]; +}; + +/** + * + * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions + * Assumption is these functions will be called after clocks are enabled + * @setup_tearcheck : program tear check values + * @enable_tearcheck : enables tear check + * @get_vsync_info : retries timing info of the panel + * @setup_autorefresh : configure and enable the autorefresh config + * @get_autorefresh : retrieve autorefresh config from hardware + * @setup_dither : function to program the dither hw block + * @get_line_count: obtain current vertical line counter + */ +struct dpu_hw_pingpong_ops { + /** + * enables vysnc generation and sets up init value of + * read pointer and programs the tear check cofiguration + */ + int (*setup_tearcheck)(struct dpu_hw_pingpong *pp, + struct dpu_hw_tear_check *cfg); + + /** + * enables tear check block + */ + int (*enable_tearcheck)(struct dpu_hw_pingpong *pp, + bool enable); + + /** + * read, modify, write to either set or clear listening to external TE + * @Return: 1 if TE was originally connected, 0 if not, or -ERROR + */ + int (*connect_external_te)(struct dpu_hw_pingpong *pp, + bool enable_external_te); + + /** + * provides the programmed and current + * line_count + */ + int (*get_vsync_info)(struct dpu_hw_pingpong *pp, + struct dpu_hw_pp_vsync_info *info); + + /** + * configure and enable the autorefresh config + */ + void (*setup_autorefresh)(struct dpu_hw_pingpong *pp, + u32 frame_count, bool enable); + + /** + * retrieve autorefresh config from hardware + */ + bool (*get_autorefresh)(struct dpu_hw_pingpong *pp, + u32 *frame_count); + + /** + * poll until write pointer transmission starts + * @Return: 0 on success, -ETIMEDOUT on timeout + */ + int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us); + + /** + * Obtain current vertical line counter + */ + u32 (*get_line_count)(struct dpu_hw_pingpong *pp); + + /** + * Setup dither matix for pingpong block + */ + void (*setup_dither)(struct dpu_hw_pingpong *pp, + struct dpu_hw_dither_cfg *cfg); + /** + * Enable DSC + */ + int (*enable_dsc)(struct dpu_hw_pingpong *pp); + + /** + * Disable DSC + */ + void (*disable_dsc)(struct dpu_hw_pingpong *pp); + + /** + * Setup DSC + */ + int (*setup_dsc)(struct dpu_hw_pingpong *pp); +}; + +struct dpu_hw_merge_3d; + +struct dpu_hw_pingpong { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* pingpong */ + enum dpu_pingpong idx; + const struct dpu_pingpong_cfg *caps; + struct dpu_hw_merge_3d *merge_3d; + + /* ops */ + struct dpu_hw_pingpong_ops ops; +}; + +/** + * to_dpu_hw_pingpong - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_pingpong, base); +} + +/** + * dpu_hw_pingpong_init - initializes the pingpong driver for the passed + * pingpong idx. + * @idx: Pingpong index for which driver object is required + * @addr: Mapped register io address of MDP + * @m: Pointer to mdss catalog data + * Returns: Error code or allocated dpu_hw_pingpong context + */ +struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_pingpong_destroy - destroys pingpong driver context + * should be called to free the context + * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init + */ +void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp); + +#endif /*_DPU_HW_PINGPONG_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c new file mode 100644 index 000000000..691c471b0 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -0,0 +1,815 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_sspp.h" +#include "dpu_kms.h" + +#include + +#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087 + +/* DPU_SSPP_SRC */ +#define SSPP_SRC_SIZE 0x00 +#define SSPP_SRC_XY 0x08 +#define SSPP_OUT_SIZE 0x0c +#define SSPP_OUT_XY 0x10 +#define SSPP_SRC0_ADDR 0x14 +#define SSPP_SRC1_ADDR 0x18 +#define SSPP_SRC2_ADDR 0x1C +#define SSPP_SRC3_ADDR 0x20 +#define SSPP_SRC_YSTRIDE0 0x24 +#define SSPP_SRC_YSTRIDE1 0x28 +#define SSPP_SRC_FORMAT 0x30 +#define SSPP_SRC_UNPACK_PATTERN 0x34 +#define SSPP_SRC_OP_MODE 0x38 + +/* SSPP_MULTIRECT*/ +#define SSPP_SRC_SIZE_REC1 0x16C +#define SSPP_SRC_XY_REC1 0x168 +#define SSPP_OUT_SIZE_REC1 0x160 +#define SSPP_OUT_XY_REC1 0x164 +#define SSPP_SRC_FORMAT_REC1 0x174 +#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178 +#define SSPP_SRC_OP_MODE_REC1 0x17C +#define SSPP_MULTIRECT_OPMODE 0x170 +#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180 +#define SSPP_EXCL_REC_SIZE_REC1 0x184 +#define SSPP_EXCL_REC_XY_REC1 0x188 + +#define MDSS_MDP_OP_DEINTERLACE BIT(22) +#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23) +#define MDSS_MDP_OP_IGC_ROM_1 BIT(18) +#define MDSS_MDP_OP_IGC_ROM_0 BIT(17) +#define MDSS_MDP_OP_IGC_EN BIT(16) +#define MDSS_MDP_OP_FLIP_UD BIT(14) +#define MDSS_MDP_OP_FLIP_LR BIT(13) +#define MDSS_MDP_OP_BWC_EN BIT(0) +#define MDSS_MDP_OP_PE_OVERRIDE BIT(31) +#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1) +#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1) +#define MDSS_MDP_OP_BWC_Q_MED (2 << 1) + +#define SSPP_SRC_CONSTANT_COLOR 0x3c +#define SSPP_EXCL_REC_CTL 0x40 +#define SSPP_UBWC_STATIC_CTRL 0x44 +#define SSPP_FETCH_CONFIG 0x048 +#define SSPP_DANGER_LUT 0x60 +#define SSPP_SAFE_LUT 0x64 +#define SSPP_CREQ_LUT 0x68 +#define SSPP_QOS_CTRL 0x6C +#define SSPP_DECIMATION_CONFIG 0xB4 +#define SSPP_SRC_ADDR_SW_STATUS 0x70 +#define SSPP_CREQ_LUT_0 0x74 +#define SSPP_CREQ_LUT_1 0x78 +#define SSPP_SW_PIX_EXT_C0_LR 0x100 +#define SSPP_SW_PIX_EXT_C0_TB 0x104 +#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108 +#define SSPP_SW_PIX_EXT_C1C2_LR 0x110 +#define SSPP_SW_PIX_EXT_C1C2_TB 0x114 +#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118 +#define SSPP_SW_PIX_EXT_C3_LR 0x120 +#define SSPP_SW_PIX_EXT_C3_TB 0x124 +#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128 +#define SSPP_TRAFFIC_SHAPER 0x130 +#define SSPP_CDP_CNTL 0x134 +#define SSPP_UBWC_ERROR_STATUS 0x138 +#define SSPP_CDP_CNTL_REC1 0x13c +#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150 +#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154 +#define SSPP_TRAFFIC_SHAPER_REC1 0x158 +#define SSPP_EXCL_REC_SIZE 0x1B4 +#define SSPP_EXCL_REC_XY 0x1B8 +#define SSPP_VIG_OP_MODE 0x0 +#define SSPP_VIG_CSC_10_OP_MODE 0x0 +#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF + +/* SSPP_QOS_CTRL */ +#define SSPP_QOS_CTRL_VBLANK_EN BIT(16) +#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0) +#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3 +#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4 +#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3 +#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20 + +/* DPU_SSPP_SCALER_QSEED2 */ +#define SCALE_CONFIG 0x04 +#define COMP0_3_PHASE_STEP_X 0x10 +#define COMP0_3_PHASE_STEP_Y 0x14 +#define COMP1_2_PHASE_STEP_X 0x18 +#define COMP1_2_PHASE_STEP_Y 0x1c +#define COMP0_3_INIT_PHASE_X 0x20 +#define COMP0_3_INIT_PHASE_Y 0x24 +#define COMP1_2_INIT_PHASE_X 0x28 +#define COMP1_2_INIT_PHASE_Y 0x2C +#define VIG_0_QSEED2_SHARP 0x30 + +/* + * Definitions for ViG op modes + */ +#define VIG_OP_CSC_DST_DATAFMT BIT(19) +#define VIG_OP_CSC_SRC_DATAFMT BIT(18) +#define VIG_OP_CSC_EN BIT(17) +#define VIG_OP_MEM_PROT_CONT BIT(15) +#define VIG_OP_MEM_PROT_VAL BIT(14) +#define VIG_OP_MEM_PROT_SAT BIT(13) +#define VIG_OP_MEM_PROT_HUE BIT(12) +#define VIG_OP_HIST BIT(8) +#define VIG_OP_SKY_COL BIT(7) +#define VIG_OP_FOIL BIT(6) +#define VIG_OP_SKIN_COL BIT(5) +#define VIG_OP_PA_EN BIT(4) +#define VIG_OP_PA_SAT_ZERO_EXP BIT(2) +#define VIG_OP_MEM_PROT_BLEND BIT(1) + +/* + * Definitions for CSC 10 op modes + */ +#define VIG_CSC_10_SRC_DATAFMT BIT(1) +#define VIG_CSC_10_EN BIT(0) +#define CSC_10BIT_OFFSET 4 + +/* traffic shaper clock in Hz */ +#define TS_CLK 19200000 + + +static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx, + int s_id, + u32 *idx) +{ + int rc = 0; + const struct dpu_sspp_sub_blks *sblk; + + if (!ctx || !ctx->cap || !ctx->cap->sblk) + return -EINVAL; + + sblk = ctx->cap->sblk; + + switch (s_id) { + case DPU_SSPP_SRC: + *idx = sblk->src_blk.base; + break; + case DPU_SSPP_SCALER_QSEED2: + case DPU_SSPP_SCALER_QSEED3: + case DPU_SSPP_SCALER_RGB: + *idx = sblk->scaler_blk.base; + break; + case DPU_SSPP_CSC: + case DPU_SSPP_CSC_10BIT: + *idx = sblk->csc_blk.base; + break; + default: + rc = -EINVAL; + } + + return rc; +} + +static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx, + enum dpu_sspp_multirect_index index, + enum dpu_sspp_multirect_mode mode) +{ + u32 mode_mask; + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) + return; + + if (index == DPU_SSPP_RECT_SOLO) { + /** + * if rect index is RECT_SOLO, we cannot expect a + * virtual plane sharing the same SSPP id. So we go + * and disable multirect + */ + mode_mask = 0; + } else { + mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx); + mode_mask |= index; + if (mode == DPU_SSPP_MULTIRECT_TIME_MX) + mode_mask |= BIT(2); + else + mode_mask &= ~BIT(2); + } + + DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask); +} + +static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx, + u32 mask, u8 en) +{ + u32 idx; + u32 opmode; + + if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) || + _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) || + !test_bit(DPU_SSPP_CSC, &ctx->cap->features)) + return; + + opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx); + + if (en) + opmode |= mask; + else + opmode &= ~mask; + + DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode); +} + +static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx, + u32 mask, u8 en) +{ + u32 idx; + u32 opmode; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx)) + return; + + opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx); + if (en) + opmode |= mask; + else + opmode &= ~mask; + + DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode); +} + +/* + * Setup source pixel format, flip, + */ +static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx, + const struct dpu_format *fmt, u32 flags, + enum dpu_sspp_multirect_index rect_mode) +{ + struct dpu_hw_blk_reg_map *c; + u32 chroma_samp, unpack, src_format; + u32 opmode = 0; + u32 fast_clear = 0; + u32 op_mode_off, unpack_pat_off, format_off; + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt) + return; + + if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) { + op_mode_off = SSPP_SRC_OP_MODE; + unpack_pat_off = SSPP_SRC_UNPACK_PATTERN; + format_off = SSPP_SRC_FORMAT; + } else { + op_mode_off = SSPP_SRC_OP_MODE_REC1; + unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1; + format_off = SSPP_SRC_FORMAT_REC1; + } + + c = &ctx->hw; + opmode = DPU_REG_READ(c, op_mode_off + idx); + opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD | + MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE); + + if (flags & DPU_SSPP_FLIP_LR) + opmode |= MDSS_MDP_OP_FLIP_LR; + if (flags & DPU_SSPP_FLIP_UD) + opmode |= MDSS_MDP_OP_FLIP_UD; + + chroma_samp = fmt->chroma_sample; + if (flags & DPU_SSPP_SOURCE_ROTATED_90) { + if (chroma_samp == DPU_CHROMA_H2V1) + chroma_samp = DPU_CHROMA_H1V2; + else if (chroma_samp == DPU_CHROMA_H1V2) + chroma_samp = DPU_CHROMA_H2V1; + } + + src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) | + (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) | + (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0); + + if (flags & DPU_SSPP_ROT_90) + src_format |= BIT(11); /* ROT90 */ + + if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED) + src_format |= BIT(8); /* SRCC3_EN */ + + if (flags & DPU_SSPP_SOLID_FILL) + src_format |= BIT(22); + + unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) | + (fmt->element[1] << 8) | (fmt->element[0] << 0); + src_format |= ((fmt->unpack_count - 1) << 12) | + (fmt->unpack_tight << 17) | + (fmt->unpack_align_msb << 18) | + ((fmt->bpp - 1) << 9); + + if (fmt->fetch_mode != DPU_FETCH_LINEAR) { + if (DPU_FORMAT_IS_UBWC(fmt)) + opmode |= MDSS_MDP_OP_BWC_EN; + src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */ + DPU_REG_WRITE(c, SSPP_FETCH_CONFIG, + DPU_FETCH_CONFIG_RESET_VALUE | + ctx->mdp->highest_bank_bit << 18); + switch (ctx->catalog->caps->ubwc_version) { + case DPU_HW_UBWC_VER_10: + /* TODO: UBWC v1 case */ + break; + case DPU_HW_UBWC_VER_20: + fast_clear = fmt->alpha_enable ? BIT(31) : 0; + DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL, + fast_clear | (ctx->mdp->ubwc_swizzle) | + (ctx->mdp->highest_bank_bit << 4)); + break; + case DPU_HW_UBWC_VER_30: + DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL, + BIT(30) | (ctx->mdp->ubwc_swizzle) | + (ctx->mdp->highest_bank_bit << 4)); + break; + case DPU_HW_UBWC_VER_40: + DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL, + DPU_FORMAT_IS_YUV(fmt) ? 0 : BIT(30)); + break; + } + } + + opmode |= MDSS_MDP_OP_PE_OVERRIDE; + + /* if this is YUV pixel format, enable CSC */ + if (DPU_FORMAT_IS_YUV(fmt)) + src_format |= BIT(15); + + if (DPU_FORMAT_IS_DX(fmt)) + src_format |= BIT(14); + + /* update scaler opmode, if appropriate */ + if (test_bit(DPU_SSPP_CSC, &ctx->cap->features)) + _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT, + DPU_FORMAT_IS_YUV(fmt)); + else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) + _sspp_setup_csc10_opmode(ctx, + VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT, + DPU_FORMAT_IS_YUV(fmt)); + + DPU_REG_WRITE(c, format_off + idx, src_format); + DPU_REG_WRITE(c, unpack_pat_off + idx, unpack); + DPU_REG_WRITE(c, op_mode_off + idx, opmode); + + /* clear previous UBWC error */ + DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31)); +} + +static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx, + struct dpu_hw_pixel_ext *pe_ext) +{ + struct dpu_hw_blk_reg_map *c; + u8 color; + u32 lr_pe[4], tb_pe[4], tot_req_pixels[4]; + const u32 bytemask = 0xff; + const u32 shortmask = 0xffff; + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext) + return; + + c = &ctx->hw; + + /* program SW pixel extension override for all pipes*/ + for (color = 0; color < DPU_MAX_PLANES; color++) { + /* color 2 has the same set of registers as color 1 */ + if (color == 2) + continue; + + lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)| + ((pe_ext->right_rpt[color] & bytemask) << 16)| + ((pe_ext->left_ftch[color] & bytemask) << 8)| + (pe_ext->left_rpt[color] & bytemask); + + tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)| + ((pe_ext->btm_rpt[color] & bytemask) << 16)| + ((pe_ext->top_ftch[color] & bytemask) << 8)| + (pe_ext->top_rpt[color] & bytemask); + + tot_req_pixels[color] = (((pe_ext->roi_h[color] + + pe_ext->num_ext_pxls_top[color] + + pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) | + ((pe_ext->roi_w[color] + + pe_ext->num_ext_pxls_left[color] + + pe_ext->num_ext_pxls_right[color]) & shortmask); + } + + /* color 0 */ + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]); + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]); + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx, + tot_req_pixels[0]); + + /* color 1 and color 2 */ + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]); + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]); + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx, + tot_req_pixels[1]); + + /* color 3 */ + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]); + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]); + DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx, + tot_req_pixels[3]); +} + +static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_cfg *sspp, + void *scaler_cfg) +{ + u32 idx; + struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp + || !scaler3_cfg) + return; + + dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx, + ctx->cap->sblk->scaler_blk.version, + sspp->layout.format); +} + +static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx) +{ + u32 idx; + + if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx)) + return 0; + + return dpu_hw_get_scaler3_ver(&ctx->hw, idx); +} + +/* + * dpu_hw_sspp_setup_rects() + */ +static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_cfg *cfg, + enum dpu_sspp_multirect_index rect_index) +{ + struct dpu_hw_blk_reg_map *c; + u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1; + u32 src_size_off, src_xy_off, out_size_off, out_xy_off; + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg) + return; + + c = &ctx->hw; + + if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) { + src_size_off = SSPP_SRC_SIZE; + src_xy_off = SSPP_SRC_XY; + out_size_off = SSPP_OUT_SIZE; + out_xy_off = SSPP_OUT_XY; + } else { + src_size_off = SSPP_SRC_SIZE_REC1; + src_xy_off = SSPP_SRC_XY_REC1; + out_size_off = SSPP_OUT_SIZE_REC1; + out_xy_off = SSPP_OUT_XY_REC1; + } + + + /* src and dest rect programming */ + src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1; + src_size = (drm_rect_height(&cfg->src_rect) << 16) | + drm_rect_width(&cfg->src_rect); + dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1; + dst_size = (drm_rect_height(&cfg->dst_rect) << 16) | + drm_rect_width(&cfg->dst_rect); + + if (rect_index == DPU_SSPP_RECT_SOLO) { + ystride0 = (cfg->layout.plane_pitch[0]) | + (cfg->layout.plane_pitch[1] << 16); + ystride1 = (cfg->layout.plane_pitch[2]) | + (cfg->layout.plane_pitch[3] << 16); + } else { + ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx); + ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx); + + if (rect_index == DPU_SSPP_RECT_0) { + ystride0 = (ystride0 & 0xFFFF0000) | + (cfg->layout.plane_pitch[0] & 0x0000FFFF); + ystride1 = (ystride1 & 0xFFFF0000)| + (cfg->layout.plane_pitch[2] & 0x0000FFFF); + } else { + ystride0 = (ystride0 & 0x0000FFFF) | + ((cfg->layout.plane_pitch[0] << 16) & + 0xFFFF0000); + ystride1 = (ystride1 & 0x0000FFFF) | + ((cfg->layout.plane_pitch[2] << 16) & + 0xFFFF0000); + } + } + + /* rectangle register programming */ + DPU_REG_WRITE(c, src_size_off + idx, src_size); + DPU_REG_WRITE(c, src_xy_off + idx, src_xy); + DPU_REG_WRITE(c, out_size_off + idx, dst_size); + DPU_REG_WRITE(c, out_xy_off + idx, dst_xy); + + DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0); + DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1); +} + +static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_cfg *cfg, + enum dpu_sspp_multirect_index rect_mode) +{ + int i; + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) + return; + + if (rect_mode == DPU_SSPP_RECT_SOLO) { + for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++) + DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4, + cfg->layout.plane_addr[i]); + } else if (rect_mode == DPU_SSPP_RECT_0) { + DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx, + cfg->layout.plane_addr[0]); + DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx, + cfg->layout.plane_addr[2]); + } else { + DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx, + cfg->layout.plane_addr[0]); + DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx, + cfg->layout.plane_addr[2]); + } +} + +static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx, + const struct dpu_csc_cfg *data) +{ + u32 idx; + bool csc10 = false; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data) + return; + + if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) { + idx += CSC_10BIT_OFFSET; + csc10 = true; + } + + dpu_hw_csc_setup(&ctx->hw, idx, data, csc10); +} + +static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum + dpu_sspp_multirect_index rect_index) +{ + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) + return; + + if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) + DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color); + else + DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx, + color); +} + +static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx, + u32 danger_lut, + u32 safe_lut) +{ + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) + return; + + DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, danger_lut); + DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, safe_lut); +} + +static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx, + u64 creq_lut) +{ + u32 idx; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) + return; + + if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) { + DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, creq_lut); + DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx, + creq_lut >> 32); + } else { + DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, creq_lut); + } +} + +static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_qos_cfg *cfg) +{ + u32 idx; + u32 qos_ctrl = 0; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) + return; + + if (cfg->vblank_en) { + qos_ctrl |= ((cfg->creq_vblank & + SSPP_QOS_CTRL_CREQ_VBLANK_MASK) << + SSPP_QOS_CTRL_CREQ_VBLANK_OFF); + qos_ctrl |= ((cfg->danger_vblank & + SSPP_QOS_CTRL_DANGER_VBLANK_MASK) << + SSPP_QOS_CTRL_DANGER_VBLANK_OFF); + qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN; + } + + if (cfg->danger_safe_en) + qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN; + + DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl); +} + +static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx, + struct dpu_hw_cdp_cfg *cfg, + enum dpu_sspp_multirect_index index) +{ + u32 idx; + u32 cdp_cntl = 0; + u32 cdp_cntl_offset = 0; + + if (!ctx || !cfg) + return; + + if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx)) + return; + + if (index == DPU_SSPP_RECT_SOLO || index == DPU_SSPP_RECT_0) + cdp_cntl_offset = SSPP_CDP_CNTL; + else + cdp_cntl_offset = SSPP_CDP_CNTL_REC1; + + if (cfg->enable) + cdp_cntl |= BIT(0); + if (cfg->ubwc_meta_enable) + cdp_cntl |= BIT(1); + if (cfg->tile_amortize_enable) + cdp_cntl |= BIT(2); + if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64) + cdp_cntl |= BIT(3); + + DPU_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl); +} + +static void _setup_layer_ops(struct dpu_hw_pipe *c, + unsigned long features) +{ + if (test_bit(DPU_SSPP_SRC, &features)) { + c->ops.setup_format = dpu_hw_sspp_setup_format; + c->ops.setup_rects = dpu_hw_sspp_setup_rects; + c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress; + c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill; + c->ops.setup_pe = dpu_hw_sspp_setup_pe_config; + } + + if (test_bit(DPU_SSPP_QOS, &features)) { + c->ops.setup_danger_safe_lut = + dpu_hw_sspp_setup_danger_safe_lut; + c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut; + c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl; + } + + if (test_bit(DPU_SSPP_CSC, &features) || + test_bit(DPU_SSPP_CSC_10BIT, &features)) + c->ops.setup_csc = dpu_hw_sspp_setup_csc; + + if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) || + test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features)) + c->ops.setup_multirect = dpu_hw_sspp_setup_multirect; + + if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) || + test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) || + test_bit(DPU_SSPP_SCALER_QSEED4, &features)) { + c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3; + c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver; + } + + if (test_bit(DPU_SSPP_CDP, &features)) + c->ops.setup_cdp = dpu_hw_sspp_setup_cdp; +} + +#ifdef CONFIG_DEBUG_FS +int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry) +{ + const struct dpu_sspp_cfg *cfg = hw_pipe->cap; + const struct dpu_sspp_sub_blks *sblk = cfg->sblk; + struct dentry *debugfs_root; + char sspp_name[32]; + + snprintf(sspp_name, sizeof(sspp_name), "%d", hw_pipe->idx); + + /* create overall sub-directory for the pipe */ + debugfs_root = + debugfs_create_dir(sspp_name, entry); + + /* don't error check these */ + debugfs_create_xul("features", 0600, + debugfs_root, (unsigned long *)&hw_pipe->cap->features); + + /* add register dump support */ + dpu_debugfs_create_regset32("src_blk", 0400, + debugfs_root, + sblk->src_blk.base + cfg->base, + sblk->src_blk.len, + kms); + + if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) || + cfg->features & BIT(DPU_SSPP_SCALER_QSEED4)) + dpu_debugfs_create_regset32("scaler_blk", 0400, + debugfs_root, + sblk->scaler_blk.base + cfg->base, + sblk->scaler_blk.len, + kms); + + if (cfg->features & BIT(DPU_SSPP_CSC) || + cfg->features & BIT(DPU_SSPP_CSC_10BIT)) + dpu_debugfs_create_regset32("csc_blk", 0400, + debugfs_root, + sblk->csc_blk.base + cfg->base, + sblk->csc_blk.len, + kms); + + debugfs_create_u32("xin_id", + 0400, + debugfs_root, + (u32 *) &cfg->xin_id); + debugfs_create_u32("clk_ctrl", + 0400, + debugfs_root, + (u32 *) &cfg->clk_ctrl); + debugfs_create_x32("creq_vblank", + 0600, + debugfs_root, + (u32 *) &sblk->creq_vblank); + debugfs_create_x32("danger_vblank", + 0600, + debugfs_root, + (u32 *) &sblk->danger_vblank); + + return 0; +} +#endif + + +static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp, + void __iomem *addr, + const struct dpu_mdss_cfg *catalog, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + if ((sspp < SSPP_MAX) && catalog && addr && b) { + for (i = 0; i < catalog->sspp_count; i++) { + if (sspp == catalog->sspp[i].id) { + b->blk_addr = addr + catalog->sspp[i].base; + b->log_mask = DPU_DBG_MASK_SSPP; + return &catalog->sspp[i]; + } + } + } + + return ERR_PTR(-ENOMEM); +} + +struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx, + void __iomem *addr, const struct dpu_mdss_cfg *catalog) +{ + struct dpu_hw_pipe *hw_pipe; + const struct dpu_sspp_cfg *cfg; + + if (!addr || !catalog) + return ERR_PTR(-EINVAL); + + hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL); + if (!hw_pipe) + return ERR_PTR(-ENOMEM); + + cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(hw_pipe); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + hw_pipe->catalog = catalog; + hw_pipe->mdp = &catalog->mdp[0]; + hw_pipe->idx = idx; + hw_pipe->cap = cfg; + _setup_layer_ops(hw_pipe, hw_pipe->cap->features); + + return hw_pipe; +} + +void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx) +{ + kfree(ctx); +} + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h new file mode 100644 index 000000000..0c95b7e64 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -0,0 +1,395 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_SSPP_H +#define _DPU_HW_SSPP_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" +#include "dpu_formats.h" + +struct dpu_hw_pipe; + +/** + * Flags + */ +#define DPU_SSPP_FLIP_LR BIT(0) +#define DPU_SSPP_FLIP_UD BIT(1) +#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2) +#define DPU_SSPP_ROT_90 BIT(3) +#define DPU_SSPP_SOLID_FILL BIT(4) + +/** + * Define all scaler feature bits in catalog + */ +#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \ + BIT(DPU_SSPP_SCALER_QSEED2) | \ + BIT(DPU_SSPP_SCALER_QSEED3) | \ + BIT(DPU_SSPP_SCALER_QSEED3LITE) | \ + BIT(DPU_SSPP_SCALER_QSEED4)) + +/* + * Define all CSC feature bits in catalog + */ +#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \ + BIT(DPU_SSPP_CSC_10BIT)) + +/** + * Component indices + */ +enum { + DPU_SSPP_COMP_0, + DPU_SSPP_COMP_1_2, + DPU_SSPP_COMP_2, + DPU_SSPP_COMP_3, + + DPU_SSPP_COMP_MAX +}; + +/** + * DPU_SSPP_RECT_SOLO - multirect disabled + * DPU_SSPP_RECT_0 - rect0 of a multirect pipe + * DPU_SSPP_RECT_1 - rect1 of a multirect pipe + * + * Note: HW supports multirect with either RECT0 or + * RECT1. Considering no benefit of such configs over + * SOLO mode and to keep the plane management simple, + * we dont support single rect multirect configs. + */ +enum dpu_sspp_multirect_index { + DPU_SSPP_RECT_SOLO = 0, + DPU_SSPP_RECT_0, + DPU_SSPP_RECT_1, +}; + +enum dpu_sspp_multirect_mode { + DPU_SSPP_MULTIRECT_NONE = 0, + DPU_SSPP_MULTIRECT_PARALLEL, + DPU_SSPP_MULTIRECT_TIME_MX, +}; + +enum { + DPU_FRAME_LINEAR, + DPU_FRAME_TILE_A4X, + DPU_FRAME_TILE_A5X, +}; + +enum dpu_hw_filter { + DPU_SCALE_FILTER_NEAREST = 0, + DPU_SCALE_FILTER_BIL, + DPU_SCALE_FILTER_PCMN, + DPU_SCALE_FILTER_CA, + DPU_SCALE_FILTER_MAX +}; + +enum dpu_hw_filter_alpa { + DPU_SCALE_ALPHA_PIXEL_REP, + DPU_SCALE_ALPHA_BIL +}; + +enum dpu_hw_filter_yuv { + DPU_SCALE_2D_4X4, + DPU_SCALE_2D_CIR, + DPU_SCALE_1D_SEP, + DPU_SCALE_BIL +}; + +struct dpu_hw_sharp_cfg { + u32 strength; + u32 edge_thr; + u32 smooth_thr; + u32 noise_thr; +}; + +struct dpu_hw_pixel_ext { + /* scaling factors are enabled for this input layer */ + uint8_t enable_pxl_ext; + + int init_phase_x[DPU_MAX_PLANES]; + int phase_step_x[DPU_MAX_PLANES]; + int init_phase_y[DPU_MAX_PLANES]; + int phase_step_y[DPU_MAX_PLANES]; + + /* + * Number of pixels extension in left, right, top and bottom direction + * for all color components. This pixel value for each color component + * should be sum of fetch + repeat pixels. + */ + int num_ext_pxls_left[DPU_MAX_PLANES]; + int num_ext_pxls_right[DPU_MAX_PLANES]; + int num_ext_pxls_top[DPU_MAX_PLANES]; + int num_ext_pxls_btm[DPU_MAX_PLANES]; + + /* + * Number of pixels needs to be overfetched in left, right, top and + * bottom directions from source image for scaling. + */ + int left_ftch[DPU_MAX_PLANES]; + int right_ftch[DPU_MAX_PLANES]; + int top_ftch[DPU_MAX_PLANES]; + int btm_ftch[DPU_MAX_PLANES]; + + /* + * Number of pixels needs to be repeated in left, right, top and + * bottom directions for scaling. + */ + int left_rpt[DPU_MAX_PLANES]; + int right_rpt[DPU_MAX_PLANES]; + int top_rpt[DPU_MAX_PLANES]; + int btm_rpt[DPU_MAX_PLANES]; + + uint32_t roi_w[DPU_MAX_PLANES]; + uint32_t roi_h[DPU_MAX_PLANES]; + + /* + * Filter type to be used for scaling in horizontal and vertical + * directions + */ + enum dpu_hw_filter horz_filter[DPU_MAX_PLANES]; + enum dpu_hw_filter vert_filter[DPU_MAX_PLANES]; + +}; + +/** + * struct dpu_hw_pipe_cfg : Pipe description + * @layout: format layout information for programming buffer to hardware + * @src_rect: src ROI, caller takes into account the different operations + * such as decimation, flip etc to program this field + * @dest_rect: destination ROI. + * @index: index of the rectangle of SSPP + * @mode: parallel or time multiplex multirect mode + */ +struct dpu_hw_pipe_cfg { + struct dpu_hw_fmt_layout layout; + struct drm_rect src_rect; + struct drm_rect dst_rect; + enum dpu_sspp_multirect_index index; + enum dpu_sspp_multirect_mode mode; +}; + +/** + * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration + * @creq_vblank: creq value generated to vbif during vertical blanking + * @danger_vblank: danger value generated during vertical blanking + * @vblank_en: enable creq_vblank and danger_vblank during vblank + * @danger_safe_en: enable danger safe generation + */ +struct dpu_hw_pipe_qos_cfg { + u32 creq_vblank; + u32 danger_vblank; + bool vblank_en; + bool danger_safe_en; +}; + +/** + * enum CDP preload ahead address size + */ +enum { + DPU_SSPP_CDP_PRELOAD_AHEAD_32, + DPU_SSPP_CDP_PRELOAD_AHEAD_64 +}; + +/** + * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration + * @size: size to prefill in bytes, or zero to disable + * @time: time to prefill in usec, or zero to disable + */ +struct dpu_hw_pipe_ts_cfg { + u64 size; + u64 time; +}; + +/** + * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions + * Caller must call the init function to get the pipe context for each pipe + * Assumption is these functions will be called after clocks are enabled + */ +struct dpu_hw_sspp_ops { + /** + * setup_format - setup pixel format cropping rectangle, flip + * @ctx: Pointer to pipe context + * @cfg: Pointer to pipe config structure + * @flags: Extra flags for format config + * @index: rectangle index in multirect + */ + void (*setup_format)(struct dpu_hw_pipe *ctx, + const struct dpu_format *fmt, u32 flags, + enum dpu_sspp_multirect_index index); + + /** + * setup_rects - setup pipe ROI rectangles + * @ctx: Pointer to pipe context + * @cfg: Pointer to pipe config structure + * @index: rectangle index in multirect + */ + void (*setup_rects)(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_cfg *cfg, + enum dpu_sspp_multirect_index index); + + /** + * setup_pe - setup pipe pixel extension + * @ctx: Pointer to pipe context + * @pe_ext: Pointer to pixel ext settings + */ + void (*setup_pe)(struct dpu_hw_pipe *ctx, + struct dpu_hw_pixel_ext *pe_ext); + + /** + * setup_sourceaddress - setup pipe source addresses + * @ctx: Pointer to pipe context + * @cfg: Pointer to pipe config structure + * @index: rectangle index in multirect + */ + void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_cfg *cfg, + enum dpu_sspp_multirect_index index); + + /** + * setup_csc - setup color space coversion + * @ctx: Pointer to pipe context + * @data: Pointer to config structure + */ + void (*setup_csc)(struct dpu_hw_pipe *ctx, const struct dpu_csc_cfg *data); + + /** + * setup_solidfill - enable/disable colorfill + * @ctx: Pointer to pipe context + * @const_color: Fill color value + * @flags: Pipe flags + * @index: rectangle index in multirect + */ + void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color, + enum dpu_sspp_multirect_index index); + + /** + * setup_multirect - setup multirect configuration + * @ctx: Pointer to pipe context + * @index: rectangle index in multirect + * @mode: parallel fetch / time multiplex multirect mode + */ + + void (*setup_multirect)(struct dpu_hw_pipe *ctx, + enum dpu_sspp_multirect_index index, + enum dpu_sspp_multirect_mode mode); + + /** + * setup_sharpening - setup sharpening + * @ctx: Pointer to pipe context + * @cfg: Pointer to config structure + */ + void (*setup_sharpening)(struct dpu_hw_pipe *ctx, + struct dpu_hw_sharp_cfg *cfg); + + /** + * setup_danger_safe_lut - setup danger safe LUTs + * @ctx: Pointer to pipe context + * @danger_lut: LUT for generate danger level based on fill level + * @safe_lut: LUT for generate safe level based on fill level + * + */ + void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx, + u32 danger_lut, + u32 safe_lut); + + /** + * setup_creq_lut - setup CREQ LUT + * @ctx: Pointer to pipe context + * @creq_lut: LUT for generate creq level based on fill level + * + */ + void (*setup_creq_lut)(struct dpu_hw_pipe *ctx, + u64 creq_lut); + + /** + * setup_qos_ctrl - setup QoS control + * @ctx: Pointer to pipe context + * @cfg: Pointer to pipe QoS configuration + * + */ + void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_qos_cfg *cfg); + + /** + * setup_histogram - setup histograms + * @ctx: Pointer to pipe context + * @cfg: Pointer to histogram configuration + */ + void (*setup_histogram)(struct dpu_hw_pipe *ctx, + void *cfg); + + /** + * setup_scaler - setup scaler + * @ctx: Pointer to pipe context + * @pipe_cfg: Pointer to pipe configuration + * @scaler_cfg: Pointer to scaler configuration + */ + void (*setup_scaler)(struct dpu_hw_pipe *ctx, + struct dpu_hw_pipe_cfg *pipe_cfg, + void *scaler_cfg); + + /** + * get_scaler_ver - get scaler h/w version + * @ctx: Pointer to pipe context + */ + u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx); + + /** + * setup_cdp - setup client driven prefetch + * @ctx: Pointer to pipe context + * @cfg: Pointer to cdp configuration + * @index: rectangle index in multirect + */ + void (*setup_cdp)(struct dpu_hw_pipe *ctx, + struct dpu_hw_cdp_cfg *cfg, + enum dpu_sspp_multirect_index index); +}; + +/** + * struct dpu_hw_pipe - pipe description + * @base: hardware block base structure + * @hw: block hardware details + * @catalog: back pointer to catalog + * @mdp: pointer to associated mdp portion of the catalog + * @idx: pipe index + * @cap: pointer to layer_cfg + * @ops: pointer to operations possible for this pipe + */ +struct dpu_hw_pipe { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + const struct dpu_mdss_cfg *catalog; + const struct dpu_mdp_cfg *mdp; + + /* Pipe */ + enum dpu_sspp idx; + const struct dpu_sspp_cfg *cap; + + /* Ops */ + struct dpu_hw_sspp_ops ops; +}; + +struct dpu_kms; +/** + * dpu_hw_sspp_init - initializes the sspp hw driver object. + * Should be called once before accessing every pipe. + * @idx: Pipe index for which driver object is required + * @addr: Mapped register io address of MDP + * @catalog : Pointer to mdss catalog data + */ +struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx, + void __iomem *addr, const struct dpu_mdss_cfg *catalog); + +/** + * dpu_hw_sspp_destroy(): Destroys SSPP driver context + * should be called during Hw pipe cleanup. + * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init + */ +void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx); + +void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root); +int _dpu_hw_sspp_init_debugfs(struct dpu_hw_pipe *hw_pipe, struct dpu_kms *kms, struct dentry *entry); + +#endif /*_DPU_HW_SSPP_H */ + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c new file mode 100644 index 000000000..c3110a25a --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -0,0 +1,331 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_top.h" +#include "dpu_kms.h" + +#define SSPP_SPARE 0x28 + +#define FLD_SPLIT_DISPLAY_CMD BIT(1) +#define FLD_SMART_PANEL_FREE_RUN BIT(2) +#define FLD_INTF_1_SW_TRG_MUX BIT(4) +#define FLD_INTF_2_SW_TRG_MUX BIT(8) +#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF + +#define DANGER_STATUS 0x360 +#define SAFE_STATUS 0x364 + +#define TE_LINE_INTERVAL 0x3F4 + +#define TRAFFIC_SHAPER_EN BIT(31) +#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4)) +#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4)) +#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4 + +#define MDP_WD_TIMER_0_CTL 0x380 +#define MDP_WD_TIMER_0_CTL2 0x384 +#define MDP_WD_TIMER_0_LOAD_VALUE 0x388 +#define MDP_WD_TIMER_1_CTL 0x390 +#define MDP_WD_TIMER_1_CTL2 0x394 +#define MDP_WD_TIMER_1_LOAD_VALUE 0x398 +#define MDP_WD_TIMER_2_CTL 0x420 +#define MDP_WD_TIMER_2_CTL2 0x424 +#define MDP_WD_TIMER_2_LOAD_VALUE 0x428 +#define MDP_WD_TIMER_3_CTL 0x430 +#define MDP_WD_TIMER_3_CTL2 0x434 +#define MDP_WD_TIMER_3_LOAD_VALUE 0x438 +#define MDP_WD_TIMER_4_CTL 0x440 +#define MDP_WD_TIMER_4_CTL2 0x444 +#define MDP_WD_TIMER_4_LOAD_VALUE 0x448 + +#define MDP_TICK_COUNT 16 +#define XO_CLK_RATE 19200 +#define MS_TICKS_IN_SEC 1000 + +#define CALCULATE_WD_LOAD_VALUE(fps) \ + ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps))) + +#define DCE_SEL 0x450 + +static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp, + struct split_pipe_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c; + u32 upper_pipe = 0; + u32 lower_pipe = 0; + + if (!mdp || !cfg) + return; + + c = &mdp->hw; + + if (cfg->en) { + if (cfg->mode == INTF_MODE_CMD) { + lower_pipe = FLD_SPLIT_DISPLAY_CMD; + /* interface controlling sw trigger */ + if (cfg->intf == INTF_2) + lower_pipe |= FLD_INTF_1_SW_TRG_MUX; + else + lower_pipe |= FLD_INTF_2_SW_TRG_MUX; + upper_pipe = lower_pipe; + } else { + if (cfg->intf == INTF_2) { + lower_pipe = FLD_INTF_1_SW_TRG_MUX; + upper_pipe = FLD_INTF_2_SW_TRG_MUX; + } else { + lower_pipe = FLD_INTF_2_SW_TRG_MUX; + upper_pipe = FLD_INTF_1_SW_TRG_MUX; + } + } + } + + DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0); + DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe); + DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe); + DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1); +} + +static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp, + enum dpu_clk_ctrl_type clk_ctrl, bool enable) +{ + struct dpu_hw_blk_reg_map *c; + u32 reg_off, bit_off; + u32 reg_val, new_val; + bool clk_forced_on; + + if (!mdp) + return false; + + c = &mdp->hw; + + if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX) + return false; + + reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off; + bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off; + + reg_val = DPU_REG_READ(c, reg_off); + + if (enable) + new_val = reg_val | BIT(bit_off); + else + new_val = reg_val & ~BIT(bit_off); + + DPU_REG_WRITE(c, reg_off, new_val); + + clk_forced_on = !(reg_val & BIT(bit_off)); + + return clk_forced_on; +} + + +static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp, + struct dpu_danger_safe_status *status) +{ + struct dpu_hw_blk_reg_map *c; + u32 value; + + if (!mdp || !status) + return; + + c = &mdp->hw; + + value = DPU_REG_READ(c, DANGER_STATUS); + status->mdp = (value >> 0) & 0x3; + status->sspp[SSPP_VIG0] = (value >> 4) & 0x3; + status->sspp[SSPP_VIG1] = (value >> 6) & 0x3; + status->sspp[SSPP_VIG2] = (value >> 8) & 0x3; + status->sspp[SSPP_VIG3] = (value >> 10) & 0x3; + status->sspp[SSPP_RGB0] = (value >> 12) & 0x3; + status->sspp[SSPP_RGB1] = (value >> 14) & 0x3; + status->sspp[SSPP_RGB2] = (value >> 16) & 0x3; + status->sspp[SSPP_RGB3] = (value >> 18) & 0x3; + status->sspp[SSPP_DMA0] = (value >> 20) & 0x3; + status->sspp[SSPP_DMA1] = (value >> 22) & 0x3; + status->sspp[SSPP_DMA2] = (value >> 28) & 0x3; + status->sspp[SSPP_DMA3] = (value >> 30) & 0x3; + status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3; + status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3; +} + +static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp, + struct dpu_vsync_source_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c; + u32 reg, wd_load_value, wd_ctl, wd_ctl2, i; + static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18}; + + if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber))) + return; + + c = &mdp->hw; + reg = DPU_REG_READ(c, MDP_VSYNC_SEL); + for (i = 0; i < cfg->pp_count; i++) { + int pp_idx = cfg->ppnumber[i] - PINGPONG_0; + + if (pp_idx >= ARRAY_SIZE(pp_offset)) + continue; + + reg &= ~(0xf << pp_offset[pp_idx]); + reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx]; + } + DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg); + + if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 && + cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) { + switch (cfg->vsync_source) { + case DPU_VSYNC_SOURCE_WD_TIMER_4: + wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE; + wd_ctl = MDP_WD_TIMER_4_CTL; + wd_ctl2 = MDP_WD_TIMER_4_CTL2; + break; + case DPU_VSYNC_SOURCE_WD_TIMER_3: + wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE; + wd_ctl = MDP_WD_TIMER_3_CTL; + wd_ctl2 = MDP_WD_TIMER_3_CTL2; + break; + case DPU_VSYNC_SOURCE_WD_TIMER_2: + wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE; + wd_ctl = MDP_WD_TIMER_2_CTL; + wd_ctl2 = MDP_WD_TIMER_2_CTL2; + break; + case DPU_VSYNC_SOURCE_WD_TIMER_1: + wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE; + wd_ctl = MDP_WD_TIMER_1_CTL; + wd_ctl2 = MDP_WD_TIMER_1_CTL2; + break; + case DPU_VSYNC_SOURCE_WD_TIMER_0: + default: + wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE; + wd_ctl = MDP_WD_TIMER_0_CTL; + wd_ctl2 = MDP_WD_TIMER_0_CTL2; + break; + } + + DPU_REG_WRITE(c, wd_load_value, + CALCULATE_WD_LOAD_VALUE(cfg->frame_rate)); + + DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */ + reg = DPU_REG_READ(c, wd_ctl2); + reg |= BIT(8); /* enable heartbeat timer */ + reg |= BIT(0); /* enable WD timer */ + DPU_REG_WRITE(c, wd_ctl2, reg); + + /* make sure that timers are enabled/disabled for vsync state */ + wmb(); + } +} + +static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp, + struct dpu_danger_safe_status *status) +{ + struct dpu_hw_blk_reg_map *c; + u32 value; + + if (!mdp || !status) + return; + + c = &mdp->hw; + + value = DPU_REG_READ(c, SAFE_STATUS); + status->mdp = (value >> 0) & 0x1; + status->sspp[SSPP_VIG0] = (value >> 4) & 0x1; + status->sspp[SSPP_VIG1] = (value >> 6) & 0x1; + status->sspp[SSPP_VIG2] = (value >> 8) & 0x1; + status->sspp[SSPP_VIG3] = (value >> 10) & 0x1; + status->sspp[SSPP_RGB0] = (value >> 12) & 0x1; + status->sspp[SSPP_RGB1] = (value >> 14) & 0x1; + status->sspp[SSPP_RGB2] = (value >> 16) & 0x1; + status->sspp[SSPP_RGB3] = (value >> 18) & 0x1; + status->sspp[SSPP_DMA0] = (value >> 20) & 0x1; + status->sspp[SSPP_DMA1] = (value >> 22) & 0x1; + status->sspp[SSPP_DMA2] = (value >> 28) & 0x1; + status->sspp[SSPP_DMA3] = (value >> 30) & 0x1; + status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1; + status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1; +} + +static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp) +{ + struct dpu_hw_blk_reg_map *c; + + if (!mdp) + return; + + c = &mdp->hw; + + DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1); +} + +static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, + unsigned long cap) +{ + ops->setup_split_pipe = dpu_hw_setup_split_pipe; + ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl; + ops->get_danger_status = dpu_hw_get_danger_status; + ops->setup_vsync_source = dpu_hw_setup_vsync_source; + ops->get_safe_status = dpu_hw_get_safe_status; + + if (cap & BIT(DPU_MDP_AUDIO_SELECT)) + ops->intf_audio_select = dpu_hw_intf_audio_select; +} + +static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + if (!m || !addr || !b) + return ERR_PTR(-EINVAL); + + for (i = 0; i < m->mdp_count; i++) { + if (mdp == m->mdp[i].id) { + b->blk_addr = addr + m->mdp[i].base; + b->log_mask = DPU_DBG_MASK_TOP; + return &m->mdp[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_mdp *mdp; + const struct dpu_mdp_cfg *cfg; + + if (!addr || !m) + return ERR_PTR(-EINVAL); + + mdp = kzalloc(sizeof(*mdp), GFP_KERNEL); + if (!mdp) + return ERR_PTR(-ENOMEM); + + cfg = _top_offset(idx, m, addr, &mdp->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(mdp); + return ERR_PTR(-EINVAL); + } + + /* + * Assign ops + */ + mdp->idx = idx; + mdp->caps = cfg; + _setup_mdp_ops(&mdp->ops, mdp->caps->features); + + return mdp; +} + +void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp) +{ + kfree(mdp); +} + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h new file mode 100644 index 000000000..a1a9e44be --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_TOP_H +#define _DPU_HW_TOP_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" + +struct dpu_hw_mdp; + +/** + * struct traffic_shaper_cfg: traffic shaper configuration + * @en : enable/disable traffic shaper + * @rd_client : true if read client; false if write client + * @client_id : client identifier + * @bpc_denom : denominator of byte per clk + * @bpc_numer : numerator of byte per clk + */ +struct traffic_shaper_cfg { + bool en; + bool rd_client; + u32 client_id; + u32 bpc_denom; + u64 bpc_numer; +}; + +/** + * struct split_pipe_cfg - pipe configuration for dual display panels + * @en : Enable/disable dual pipe configuration + * @mode : Panel interface mode + * @intf : Interface id for main control path + * @split_flush_en: Allows both the paths to be flushed when master path is + * flushed + */ +struct split_pipe_cfg { + bool en; + enum dpu_intf_mode mode; + enum dpu_intf intf; + bool split_flush_en; +}; + +/** + * struct dpu_danger_safe_status: danger and safe status signals + * @mdp: top level status + * @sspp: source pipe status + */ +struct dpu_danger_safe_status { + u8 mdp; + u8 sspp[SSPP_MAX]; +}; + +/** + * struct dpu_vsync_source_cfg - configure vsync source and configure the + * watchdog timers if required. + * @pp_count: number of ping pongs active + * @frame_rate: Display frame rate + * @ppnumber: ping pong index array + * @vsync_source: vsync source selection + */ +struct dpu_vsync_source_cfg { + u32 pp_count; + u32 frame_rate; + u32 ppnumber[PINGPONG_MAX]; + u32 vsync_source; +}; + +/** + * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions + * Assumption is these functions will be called after clocks are enabled. + * @setup_split_pipe : Programs the pipe control registers + * @setup_pp_split : Programs the pp split control registers + * @setup_traffic_shaper : programs traffic shaper control + */ +struct dpu_hw_mdp_ops { + /** setup_split_pipe() : Registers are not double buffered, thisk + * function should be called before timing control enable + * @mdp : mdp top context driver + * @cfg : upper and lower part of pipe configuration + */ + void (*setup_split_pipe)(struct dpu_hw_mdp *mdp, + struct split_pipe_cfg *p); + + /** + * setup_traffic_shaper() : Setup traffic shaper control + * @mdp : mdp top context driver + * @cfg : traffic shaper configuration + */ + void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp, + struct traffic_shaper_cfg *cfg); + + /** + * setup_clk_force_ctrl - set clock force control + * @mdp: mdp top context driver + * @clk_ctrl: clock to be controlled + * @enable: force on enable + * @return: if the clock is forced-on by this function + */ + bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp, + enum dpu_clk_ctrl_type clk_ctrl, bool enable); + + /** + * get_danger_status - get danger status + * @mdp: mdp top context driver + * @status: Pointer to danger safe status + */ + void (*get_danger_status)(struct dpu_hw_mdp *mdp, + struct dpu_danger_safe_status *status); + + /** + * setup_vsync_source - setup vsync source configuration details + * @mdp: mdp top context driver + * @cfg: vsync source selection configuration + */ + void (*setup_vsync_source)(struct dpu_hw_mdp *mdp, + struct dpu_vsync_source_cfg *cfg); + + /** + * get_safe_status - get safe status + * @mdp: mdp top context driver + * @status: Pointer to danger safe status + */ + void (*get_safe_status)(struct dpu_hw_mdp *mdp, + struct dpu_danger_safe_status *status); + + /** + * intf_audio_select - select the external interface for audio + * @mdp: mdp top context driver + */ + void (*intf_audio_select)(struct dpu_hw_mdp *mdp); +}; + +struct dpu_hw_mdp { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* top */ + enum dpu_mdp idx; + const struct dpu_mdp_cfg *caps; + + /* ops */ + struct dpu_hw_mdp_ops ops; +}; + +/** + * dpu_hw_mdptop_init - initializes the top driver for the passed idx + * @idx: Interface index for which driver object is required + * @addr: Mapped register io address of MDP + * @m: Pointer to mdss catalog data + */ +struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp); + +#endif /*_DPU_HW_TOP_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c new file mode 100644 index 000000000..1b7439ae6 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include "msm_drv.h" +#include "dpu_kms.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" + +/* using a file static variables for debugfs access */ +static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE; + +/* DPU_SCALER_QSEED3 */ +#define QSEED3_HW_VERSION 0x00 +#define QSEED3_OP_MODE 0x04 +#define QSEED3_RGB2Y_COEFF 0x08 +#define QSEED3_PHASE_INIT 0x0C +#define QSEED3_PHASE_STEP_Y_H 0x10 +#define QSEED3_PHASE_STEP_Y_V 0x14 +#define QSEED3_PHASE_STEP_UV_H 0x18 +#define QSEED3_PHASE_STEP_UV_V 0x1C +#define QSEED3_PRELOAD 0x20 +#define QSEED3_DE_SHARPEN 0x24 +#define QSEED3_DE_SHARPEN_CTL 0x28 +#define QSEED3_DE_SHAPE_CTL 0x2C +#define QSEED3_DE_THRESHOLD 0x30 +#define QSEED3_DE_ADJUST_DATA_0 0x34 +#define QSEED3_DE_ADJUST_DATA_1 0x38 +#define QSEED3_DE_ADJUST_DATA_2 0x3C +#define QSEED3_SRC_SIZE_Y_RGB_A 0x40 +#define QSEED3_SRC_SIZE_UV 0x44 +#define QSEED3_DST_SIZE 0x48 +#define QSEED3_COEF_LUT_CTRL 0x4C +#define QSEED3_COEF_LUT_SWAP_BIT 0 +#define QSEED3_COEF_LUT_DIR_BIT 1 +#define QSEED3_COEF_LUT_Y_CIR_BIT 2 +#define QSEED3_COEF_LUT_UV_CIR_BIT 3 +#define QSEED3_COEF_LUT_Y_SEP_BIT 4 +#define QSEED3_COEF_LUT_UV_SEP_BIT 5 +#define QSEED3_BUFFER_CTRL 0x50 +#define QSEED3_CLK_CTRL0 0x54 +#define QSEED3_CLK_CTRL1 0x58 +#define QSEED3_CLK_STATUS 0x5C +#define QSEED3_PHASE_INIT_Y_H 0x90 +#define QSEED3_PHASE_INIT_Y_V 0x94 +#define QSEED3_PHASE_INIT_UV_H 0x98 +#define QSEED3_PHASE_INIT_UV_V 0x9C +#define QSEED3_COEF_LUT 0x100 +#define QSEED3_FILTERS 5 +#define QSEED3_LUT_REGIONS 4 +#define QSEED3_CIRCULAR_LUTS 9 +#define QSEED3_SEPARABLE_LUTS 10 +#define QSEED3_LUT_SIZE 60 +#define QSEED3_ENABLE 2 +#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32)) +#define QSEED3_CIR_LUT_SIZE \ + (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32)) +#define QSEED3_SEP_LUT_SIZE \ + (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32)) + +/* DPU_SCALER_QSEED3LITE */ +#define QSEED3LITE_COEF_LUT_Y_SEP_BIT 4 +#define QSEED3LITE_COEF_LUT_UV_SEP_BIT 5 +#define QSEED3LITE_COEF_LUT_CTRL 0x4C +#define QSEED3LITE_COEF_LUT_SWAP_BIT 0 +#define QSEED3LITE_DIR_FILTER_WEIGHT 0x60 +#define QSEED3LITE_FILTERS 2 +#define QSEED3LITE_SEPARABLE_LUTS 10 +#define QSEED3LITE_LUT_SIZE 33 +#define QSEED3LITE_SEP_LUT_SIZE \ + (QSEED3LITE_LUT_SIZE * QSEED3LITE_SEPARABLE_LUTS * sizeof(u32)) + + +void dpu_reg_write(struct dpu_hw_blk_reg_map *c, + u32 reg_off, + u32 val, + const char *name) +{ + /* don't need to mutex protect this */ + if (c->log_mask & dpu_hw_util_log_mask) + DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n", + name, reg_off, val); + writel_relaxed(val, c->blk_addr + reg_off); +} + +int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off) +{ + return readl_relaxed(c->blk_addr + reg_off); +} + +u32 *dpu_hw_util_get_log_mask_ptr(void) +{ + return &dpu_hw_util_log_mask; +} + +static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c, + struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset) +{ + int i, j, filter; + int config_lut = 0x0; + unsigned long lut_flags; + u32 lut_addr, lut_offset, lut_len; + u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL}; + static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = { + {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} }, + {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} }, + {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} }, + {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} }, + {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} }, + }; + + lut_flags = (unsigned long) scaler3_cfg->lut_flag; + if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) && + (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) { + lut[0] = scaler3_cfg->dir_lut; + config_lut = 1; + } + if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) && + (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) && + (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) { + lut[1] = scaler3_cfg->cir_lut + + scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE; + config_lut = 1; + } + if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) && + (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) && + (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) { + lut[2] = scaler3_cfg->cir_lut + + scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE; + config_lut = 1; + } + if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) && + (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) { + lut[3] = scaler3_cfg->sep_lut + + scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE; + config_lut = 1; + } + if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) && + (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) { + lut[4] = scaler3_cfg->sep_lut + + scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE; + config_lut = 1; + } + + if (config_lut) { + for (filter = 0; filter < QSEED3_FILTERS; filter++) { + if (!lut[filter]) + continue; + lut_offset = 0; + for (i = 0; i < QSEED3_LUT_REGIONS; i++) { + lut_addr = QSEED3_COEF_LUT + offset + + off_tbl[filter][i][1]; + lut_len = off_tbl[filter][i][0] << 2; + for (j = 0; j < lut_len; j++) { + DPU_REG_WRITE(c, + lut_addr, + (lut[filter])[lut_offset++]); + lut_addr += 4; + } + } + } + } + + if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags)) + DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0)); + +} + +static void _dpu_hw_setup_scaler3lite_lut(struct dpu_hw_blk_reg_map *c, + struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset) +{ + int j, filter; + int config_lut = 0x0; + unsigned long lut_flags; + u32 lut_addr, lut_offset; + u32 *lut[QSEED3LITE_FILTERS] = {NULL, NULL}; + static const uint32_t off_tbl[QSEED3_FILTERS] = { 0x000, 0x200 }; + + DPU_REG_WRITE(c, QSEED3LITE_DIR_FILTER_WEIGHT + offset, scaler3_cfg->dir_weight); + + if (!scaler3_cfg->sep_lut) + return; + + lut_flags = (unsigned long) scaler3_cfg->lut_flag; + if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) && + (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) { + lut[0] = scaler3_cfg->sep_lut + + scaler3_cfg->y_rgb_sep_lut_idx * QSEED3LITE_LUT_SIZE; + config_lut = 1; + } + if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) && + (scaler3_cfg->uv_sep_lut_idx < QSEED3LITE_SEPARABLE_LUTS) && + (scaler3_cfg->sep_len == QSEED3LITE_SEP_LUT_SIZE)) { + lut[1] = scaler3_cfg->sep_lut + + scaler3_cfg->uv_sep_lut_idx * QSEED3LITE_LUT_SIZE; + config_lut = 1; + } + + if (config_lut) { + for (filter = 0; filter < QSEED3LITE_FILTERS; filter++) { + if (!lut[filter]) + continue; + lut_offset = 0; + lut_addr = QSEED3_COEF_LUT + offset + off_tbl[filter]; + for (j = 0; j < QSEED3LITE_LUT_SIZE; j++) { + DPU_REG_WRITE(c, + lut_addr, + (lut[filter])[lut_offset++]); + lut_addr += 4; + } + } + } + + if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags)) + DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0)); + +} + +static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c, + struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset) +{ + u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr; + u32 adjust_a, adjust_b, adjust_c; + + if (!de_cfg->enable) + return; + + sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) | + ((de_cfg->sharpen_level2 & 0x1FF) << 16); + + sharp_ctl = ((de_cfg->limit & 0xF) << 9) | + ((de_cfg->prec_shift & 0x7) << 13) | + ((de_cfg->clip & 0x7) << 16); + + shape_ctl = (de_cfg->thr_quiet & 0xFF) | + ((de_cfg->thr_dieout & 0x3FF) << 16); + + de_thr = (de_cfg->thr_low & 0x3FF) | + ((de_cfg->thr_high & 0x3FF) << 16); + + adjust_a = (de_cfg->adjust_a[0] & 0x3FF) | + ((de_cfg->adjust_a[1] & 0x3FF) << 10) | + ((de_cfg->adjust_a[2] & 0x3FF) << 20); + + adjust_b = (de_cfg->adjust_b[0] & 0x3FF) | + ((de_cfg->adjust_b[1] & 0x3FF) << 10) | + ((de_cfg->adjust_b[2] & 0x3FF) << 20); + + adjust_c = (de_cfg->adjust_c[0] & 0x3FF) | + ((de_cfg->adjust_c[1] & 0x3FF) << 10) | + ((de_cfg->adjust_c[2] & 0x3FF) << 20); + + DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl); + DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl); + DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl); + DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr); + DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a); + DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b); + DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c); + +} + +void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, + struct dpu_hw_scaler3_cfg *scaler3_cfg, + u32 scaler_offset, u32 scaler_version, + const struct dpu_format *format) +{ + u32 op_mode = 0; + u32 phase_init, preload, src_y_rgb, src_uv, dst; + + if (!scaler3_cfg->enable) + goto end; + + op_mode |= BIT(0); + op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16; + + if (format && DPU_FORMAT_IS_YUV(format)) { + op_mode |= BIT(12); + op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24; + } + + op_mode |= (scaler3_cfg->blend_cfg & 1) << 31; + op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0; + + preload = + ((scaler3_cfg->preload_x[0] & 0x7F) << 0) | + ((scaler3_cfg->preload_y[0] & 0x7F) << 8) | + ((scaler3_cfg->preload_x[1] & 0x7F) << 16) | + ((scaler3_cfg->preload_y[1] & 0x7F) << 24); + + src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) | + ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16); + + src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) | + ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16); + + dst = (scaler3_cfg->dst_width & 0x1FFFF) | + ((scaler3_cfg->dst_height & 0x1FFFF) << 16); + + if (scaler3_cfg->de.enable) { + _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset); + op_mode |= BIT(8); + } + + if (scaler3_cfg->lut_flag) { + if (scaler_version < 0x2004) + _dpu_hw_setup_scaler3_lut(c, scaler3_cfg, scaler_offset); + else + _dpu_hw_setup_scaler3lite_lut(c, scaler3_cfg, scaler_offset); + } + + if (scaler_version == 0x1002) { + phase_init = + ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) | + ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) | + ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) | + ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24); + DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init); + } else { + DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset, + scaler3_cfg->init_phase_x[0] & 0x1FFFFF); + DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset, + scaler3_cfg->init_phase_y[0] & 0x1FFFFF); + DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset, + scaler3_cfg->init_phase_x[1] & 0x1FFFFF); + DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset, + scaler3_cfg->init_phase_y[1] & 0x1FFFFF); + } + + DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset, + scaler3_cfg->phase_step_x[0] & 0xFFFFFF); + + DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset, + scaler3_cfg->phase_step_y[0] & 0xFFFFFF); + + DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset, + scaler3_cfg->phase_step_x[1] & 0xFFFFFF); + + DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset, + scaler3_cfg->phase_step_y[1] & 0xFFFFFF); + + DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload); + + DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb); + + DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv); + + DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst); + +end: + if (format && !DPU_FORMAT_IS_DX(format)) + op_mode |= BIT(14); + + if (format && format->alpha_enable) { + op_mode |= BIT(10); + if (scaler_version == 0x1002) + op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30; + else + op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29; + } + + DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode); +} + +u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, + u32 scaler_offset) +{ + return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset); +} + +void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, + u32 csc_reg_off, + const struct dpu_csc_cfg *data, bool csc10) +{ + static const u32 matrix_shift = 7; + u32 clamp_shift = csc10 ? 16 : 8; + u32 val; + + /* matrix coeff - convert S15.16 to S4.9 */ + val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) | + (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16); + DPU_REG_WRITE(c, csc_reg_off, val); + val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) | + (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16); + DPU_REG_WRITE(c, csc_reg_off + 0x4, val); + val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) | + (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16); + DPU_REG_WRITE(c, csc_reg_off + 0x8, val); + val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) | + (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16); + DPU_REG_WRITE(c, csc_reg_off + 0xc, val); + val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF; + DPU_REG_WRITE(c, csc_reg_off + 0x10, val); + + /* Pre clamp */ + val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1]; + DPU_REG_WRITE(c, csc_reg_off + 0x14, val); + val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3]; + DPU_REG_WRITE(c, csc_reg_off + 0x18, val); + val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5]; + DPU_REG_WRITE(c, csc_reg_off + 0x1c, val); + + /* Post clamp */ + val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1]; + DPU_REG_WRITE(c, csc_reg_off + 0x20, val); + val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3]; + DPU_REG_WRITE(c, csc_reg_off + 0x24, val); + val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5]; + DPU_REG_WRITE(c, csc_reg_off + 0x28, val); + + /* Pre-Bias */ + DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]); + DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]); + DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]); + + /* Post-Bias */ + DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]); + DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]); + DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]); +} + +/** + * _dpu_hw_get_qos_lut - get LUT mapping based on fill level + * @tbl: Pointer to LUT table + * @total_fl: fill level + * Return: LUT setting corresponding to the fill level + */ +u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, + u32 total_fl) +{ + int i; + + if (!tbl || !tbl->nentry || !tbl->entries) + return 0; + + for (i = 0; i < tbl->nentry; i++) + if (total_fl <= tbl->entries[i].fl) + return tbl->entries[i].lut; + + /* if last fl is zero, use as default */ + if (!tbl->entries[i-1].fl) + return tbl->entries[i-1].lut; + + return 0; +} + +/* + * note: Aside from encoders, input_sel should be set to 0x0 by default + */ +void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c, + u32 misr_ctrl_offset, u8 input_sel) +{ + u32 config = 0; + + DPU_REG_WRITE(c, misr_ctrl_offset, MISR_CTRL_STATUS_CLEAR); + + /* Clear old MISR value (in case it's read before a new value is calculated)*/ + wmb(); + + config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK | + ((input_sel & 0xF) << 24); + DPU_REG_WRITE(c, misr_ctrl_offset, config); +} + +int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c, + u32 misr_ctrl_offset, + u32 misr_signature_offset, + u32 *misr_value) +{ + u32 ctrl = 0; + + if (!misr_value) + return -EINVAL; + + ctrl = DPU_REG_READ(c, misr_ctrl_offset); + + if (!(ctrl & MISR_CTRL_ENABLE)) + return -ENODATA; + + if (!(ctrl & MISR_CTRL_STATUS)) + return -EINVAL; + + *misr_value = DPU_REG_READ(c, misr_signature_offset); + + return 0; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h new file mode 100644 index 000000000..4ae2a4343 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -0,0 +1,360 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_UTIL_H +#define _DPU_HW_UTIL_H + +#include +#include +#include "dpu_hw_mdss.h" +#include "dpu_hw_catalog.h" + +#define REG_MASK(n) ((BIT(n)) - 1) +#define MISR_FRAME_COUNT 0x1 +#define MISR_CTRL_ENABLE BIT(8) +#define MISR_CTRL_STATUS BIT(9) +#define MISR_CTRL_STATUS_CLEAR BIT(10) +#define MISR_CTRL_FREE_RUN_MASK BIT(31) + +/* + * This is the common struct maintained by each sub block + * for mapping the register offsets in this block to the + * absoulute IO address + * @blk_addr: hw block register mapped address + * @log_mask: log mask for this block + */ +struct dpu_hw_blk_reg_map { + void __iomem *blk_addr; + u32 log_mask; +}; + +/** + * struct dpu_hw_blk - opaque hardware block object + */ +struct dpu_hw_blk { + /* opaque */ +}; + +/** + * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration + * @enable: detail enhancer enable/disable + * @sharpen_level1: sharpening strength for noise + * @sharpen_level2: sharpening strength for signal + * @ clip: clip shift + * @ limit: limit value + * @ thr_quiet: quiet threshold + * @ thr_dieout: dieout threshold + * @ thr_high: low threshold + * @ thr_high: high threshold + * @ prec_shift: precision shift + * @ adjust_a: A-coefficients for mapping curve + * @ adjust_b: B-coefficients for mapping curve + * @ adjust_c: C-coefficients for mapping curve + */ +struct dpu_hw_scaler3_de_cfg { + u32 enable; + int16_t sharpen_level1; + int16_t sharpen_level2; + uint16_t clip; + uint16_t limit; + uint16_t thr_quiet; + uint16_t thr_dieout; + uint16_t thr_low; + uint16_t thr_high; + uint16_t prec_shift; + int16_t adjust_a[DPU_MAX_DE_CURVES]; + int16_t adjust_b[DPU_MAX_DE_CURVES]; + int16_t adjust_c[DPU_MAX_DE_CURVES]; +}; + + +/** + * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration + * @enable: scaler enable + * @dir_en: direction detection block enable + * @ init_phase_x: horizontal initial phase + * @ phase_step_x: horizontal phase step + * @ init_phase_y: vertical initial phase + * @ phase_step_y: vertical phase step + * @ preload_x: horizontal preload value + * @ preload_y: vertical preload value + * @ src_width: source width + * @ src_height: source height + * @ dst_width: destination width + * @ dst_height: destination height + * @ y_rgb_filter_cfg: y/rgb plane filter configuration + * @ uv_filter_cfg: uv plane filter configuration + * @ alpha_filter_cfg: alpha filter configuration + * @ blend_cfg: blend coefficients configuration + * @ lut_flag: scaler LUT update flags + * 0x1 swap LUT bank + * 0x2 update 2D filter LUT + * 0x4 update y circular filter LUT + * 0x8 update uv circular filter LUT + * 0x10 update y separable filter LUT + * 0x20 update uv separable filter LUT + * @ dir_lut_idx: 2D filter LUT index + * @ y_rgb_cir_lut_idx: y circular filter LUT index + * @ uv_cir_lut_idx: uv circular filter LUT index + * @ y_rgb_sep_lut_idx: y circular filter LUT index + * @ uv_sep_lut_idx: uv separable filter LUT index + * @ dir_lut: pointer to 2D LUT + * @ cir_lut: pointer to circular filter LUT + * @ sep_lut: pointer to separable filter LUT + * @ de: detail enhancer configuration + * @ dir_weight: Directional weight + */ +struct dpu_hw_scaler3_cfg { + u32 enable; + u32 dir_en; + int32_t init_phase_x[DPU_MAX_PLANES]; + int32_t phase_step_x[DPU_MAX_PLANES]; + int32_t init_phase_y[DPU_MAX_PLANES]; + int32_t phase_step_y[DPU_MAX_PLANES]; + + u32 preload_x[DPU_MAX_PLANES]; + u32 preload_y[DPU_MAX_PLANES]; + u32 src_width[DPU_MAX_PLANES]; + u32 src_height[DPU_MAX_PLANES]; + + u32 dst_width; + u32 dst_height; + + u32 y_rgb_filter_cfg; + u32 uv_filter_cfg; + u32 alpha_filter_cfg; + u32 blend_cfg; + + u32 lut_flag; + u32 dir_lut_idx; + + u32 y_rgb_cir_lut_idx; + u32 uv_cir_lut_idx; + u32 y_rgb_sep_lut_idx; + u32 uv_sep_lut_idx; + u32 *dir_lut; + size_t dir_len; + u32 *cir_lut; + size_t cir_len; + u32 *sep_lut; + size_t sep_len; + + /* + * Detail enhancer settings + */ + struct dpu_hw_scaler3_de_cfg de; + + u32 dir_weight; +}; + +/** + * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure + * @num_ext_pxls_lr: Number of total horizontal pixels + * @num_ext_pxls_tb: Number of total vertical lines + * @left_ftch: Number of extra pixels to overfetch from left + * @right_ftch: Number of extra pixels to overfetch from right + * @top_ftch: Number of extra lines to overfetch from top + * @btm_ftch: Number of extra lines to overfetch from bottom + * @left_rpt: Number of extra pixels to repeat from left + * @right_rpt: Number of extra pixels to repeat from right + * @top_rpt: Number of extra lines to repeat from top + * @btm_rpt: Number of extra lines to repeat from bottom + */ +struct dpu_drm_pix_ext_v1 { + /* + * Number of pixels ext in left, right, top and bottom direction + * for all color components. + */ + int32_t num_ext_pxls_lr[DPU_MAX_PLANES]; + int32_t num_ext_pxls_tb[DPU_MAX_PLANES]; + + /* + * Number of pixels needs to be overfetched in left, right, top + * and bottom directions from source image for scaling. + */ + int32_t left_ftch[DPU_MAX_PLANES]; + int32_t right_ftch[DPU_MAX_PLANES]; + int32_t top_ftch[DPU_MAX_PLANES]; + int32_t btm_ftch[DPU_MAX_PLANES]; + /* + * Number of pixels needs to be repeated in left, right, top and + * bottom directions for scaling. + */ + int32_t left_rpt[DPU_MAX_PLANES]; + int32_t right_rpt[DPU_MAX_PLANES]; + int32_t top_rpt[DPU_MAX_PLANES]; + int32_t btm_rpt[DPU_MAX_PLANES]; + +}; + +/** + * struct dpu_drm_de_v1 - version 1 of detail enhancer structure + * @enable: Enables/disables detail enhancer + * @sharpen_level1: Sharpening strength for noise + * @sharpen_level2: Sharpening strength for context + * @clip: Clip coefficient + * @limit: Detail enhancer limit factor + * @thr_quiet: Quite zone threshold + * @thr_dieout: Die-out zone threshold + * @thr_low: Linear zone left threshold + * @thr_high: Linear zone right threshold + * @prec_shift: Detail enhancer precision + * @adjust_a: Mapping curves A coefficients + * @adjust_b: Mapping curves B coefficients + * @adjust_c: Mapping curves C coefficients + */ +struct dpu_drm_de_v1 { + uint32_t enable; + int16_t sharpen_level1; + int16_t sharpen_level2; + uint16_t clip; + uint16_t limit; + uint16_t thr_quiet; + uint16_t thr_dieout; + uint16_t thr_low; + uint16_t thr_high; + uint16_t prec_shift; + int16_t adjust_a[DPU_MAX_DE_CURVES]; + int16_t adjust_b[DPU_MAX_DE_CURVES]; + int16_t adjust_c[DPU_MAX_DE_CURVES]; +}; + +/** + * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler + * @enable: Scaler enable + * @dir_en: Detail enhancer enable + * @pe: Pixel extension settings + * @horz_decimate: Horizontal decimation factor + * @vert_decimate: Vertical decimation factor + * @init_phase_x: Initial scaler phase values for x + * @phase_step_x: Phase step values for x + * @init_phase_y: Initial scaler phase values for y + * @phase_step_y: Phase step values for y + * @preload_x: Horizontal preload value + * @preload_y: Vertical preload value + * @src_width: Source width + * @src_height: Source height + * @dst_width: Destination width + * @dst_height: Destination height + * @y_rgb_filter_cfg: Y/RGB plane filter configuration + * @uv_filter_cfg: UV plane filter configuration + * @alpha_filter_cfg: Alpha filter configuration + * @blend_cfg: Selection of blend coefficients + * @lut_flag: LUT configuration flags + * @dir_lut_idx: 2d 4x4 LUT index + * @y_rgb_cir_lut_idx: Y/RGB circular LUT index + * @uv_cir_lut_idx: UV circular LUT index + * @y_rgb_sep_lut_idx: Y/RGB separable LUT index + * @uv_sep_lut_idx: UV separable LUT index + * @de: Detail enhancer settings + */ +struct dpu_drm_scaler_v2 { + /* + * General definitions + */ + uint32_t enable; + uint32_t dir_en; + + /* + * Pix ext settings + */ + struct dpu_drm_pix_ext_v1 pe; + + /* + * Decimation settings + */ + uint32_t horz_decimate; + uint32_t vert_decimate; + + /* + * Phase settings + */ + int32_t init_phase_x[DPU_MAX_PLANES]; + int32_t phase_step_x[DPU_MAX_PLANES]; + int32_t init_phase_y[DPU_MAX_PLANES]; + int32_t phase_step_y[DPU_MAX_PLANES]; + + uint32_t preload_x[DPU_MAX_PLANES]; + uint32_t preload_y[DPU_MAX_PLANES]; + uint32_t src_width[DPU_MAX_PLANES]; + uint32_t src_height[DPU_MAX_PLANES]; + + uint32_t dst_width; + uint32_t dst_height; + + uint32_t y_rgb_filter_cfg; + uint32_t uv_filter_cfg; + uint32_t alpha_filter_cfg; + uint32_t blend_cfg; + + uint32_t lut_flag; + uint32_t dir_lut_idx; + + /* for Y(RGB) and UV planes*/ + uint32_t y_rgb_cir_lut_idx; + uint32_t uv_cir_lut_idx; + uint32_t y_rgb_sep_lut_idx; + uint32_t uv_sep_lut_idx; + + /* + * Detail enhancer settings + */ + struct dpu_drm_de_v1 de; +}; + +/** + * struct dpu_hw_cdp_cfg : CDP configuration + * @enable: true to enable CDP + * @ubwc_meta_enable: true to enable ubwc metadata preload + * @tile_amortize_enable: true to enable amortization control for tile format + * @preload_ahead: number of request to preload ahead + * DPU_*_CDP_PRELOAD_AHEAD_32, + * DPU_*_CDP_PRELOAD_AHEAD_64 + */ +struct dpu_hw_cdp_cfg { + bool enable; + bool ubwc_meta_enable; + bool tile_amortize_enable; + u32 preload_ahead; +}; + +u32 *dpu_hw_util_get_log_mask_ptr(void); + +void dpu_reg_write(struct dpu_hw_blk_reg_map *c, + u32 reg_off, + u32 val, + const char *name); +int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off); + +#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off) +#define DPU_REG_READ(c, off) dpu_reg_read(c, off) + +void *dpu_hw_util_get_dir(void); + +void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c, + struct dpu_hw_scaler3_cfg *scaler3_cfg, + u32 scaler_offset, u32 scaler_version, + const struct dpu_format *format); + +u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c, + u32 scaler_offset); + +void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c, + u32 csc_reg_off, + const struct dpu_csc_cfg *data, bool csc10); + +u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, + u32 total_fl); + +void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c, + u32 misr_ctrl_offset, u8 input_sel); + +int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c, + u32 misr_ctrl_offset, + u32 misr_signature_offset, + u32 *misr_value); + +#endif /* _DPU_HW_UTIL_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c new file mode 100644 index 000000000..16c56e240 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_vbif.h" + +#define VBIF_VERSION 0x0000 +#define VBIF_CLK_FORCE_CTRL0 0x0008 +#define VBIF_CLK_FORCE_CTRL1 0x000C +#define VBIF_QOS_REMAP_00 0x0020 +#define VBIF_QOS_REMAP_01 0x0024 +#define VBIF_QOS_REMAP_10 0x0028 +#define VBIF_QOS_REMAP_11 0x002C +#define VBIF_WRITE_GATHER_EN 0x00AC +#define VBIF_IN_RD_LIM_CONF0 0x00B0 +#define VBIF_IN_RD_LIM_CONF1 0x00B4 +#define VBIF_IN_RD_LIM_CONF2 0x00B8 +#define VBIF_IN_WR_LIM_CONF0 0x00C0 +#define VBIF_IN_WR_LIM_CONF1 0x00C4 +#define VBIF_IN_WR_LIM_CONF2 0x00C8 +#define VBIF_OUT_RD_LIM_CONF0 0x00D0 +#define VBIF_OUT_WR_LIM_CONF0 0x00D4 +#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160 +#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164 +#define VBIF_XIN_PND_ERR 0x0190 +#define VBIF_XIN_SRC_ERR 0x0194 +#define VBIF_XIN_CLR_ERR 0x019C +#define VBIF_XIN_HALT_CTRL0 0x0200 +#define VBIF_XIN_HALT_CTRL1 0x0204 +#define VBIF_XINL_QOS_RP_REMAP_000 0x0550 +#define VBIF_XINL_QOS_LVL_REMAP_000(vbif) (VBIF_XINL_QOS_RP_REMAP_000 + (vbif)->cap->qos_rp_remap_size) + +static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif, + u32 *pnd_errors, u32 *src_errors) +{ + struct dpu_hw_blk_reg_map *c; + u32 pnd, src; + + if (!vbif) + return; + c = &vbif->hw; + pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR); + src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR); + + if (pnd_errors) + *pnd_errors = pnd; + if (src_errors) + *src_errors = src; + + DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src); +} + +static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif, + u32 xin_id, u32 value) +{ + struct dpu_hw_blk_reg_map *c; + u32 reg_off; + u32 bit_off; + u32 reg_val; + + /* + * Assume 4 bits per bit field, 8 fields per 32-bit register so + * 16 bit fields maximum across two registers + */ + if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16) + return; + + c = &vbif->hw; + + if (xin_id >= 8) { + xin_id -= 8; + reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1; + } else { + reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0; + } + bit_off = (xin_id & 0x7) * 4; + reg_val = DPU_REG_READ(c, reg_off); + reg_val &= ~(0x7 << bit_off); + reg_val |= (value & 0x7) << bit_off; + DPU_REG_WRITE(c, reg_off, reg_val); +} + +static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif, + u32 xin_id, bool rd, u32 limit) +{ + struct dpu_hw_blk_reg_map *c = &vbif->hw; + u32 reg_val; + u32 reg_off; + u32 bit_off; + + if (rd) + reg_off = VBIF_IN_RD_LIM_CONF0; + else + reg_off = VBIF_IN_WR_LIM_CONF0; + + reg_off += (xin_id / 4) * 4; + bit_off = (xin_id % 4) * 8; + reg_val = DPU_REG_READ(c, reg_off); + reg_val &= ~(0xFF << bit_off); + reg_val |= (limit) << bit_off; + DPU_REG_WRITE(c, reg_off, reg_val); +} + +static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif, + u32 xin_id, bool rd) +{ + struct dpu_hw_blk_reg_map *c = &vbif->hw; + u32 reg_val; + u32 reg_off; + u32 bit_off; + u32 limit; + + if (rd) + reg_off = VBIF_IN_RD_LIM_CONF0; + else + reg_off = VBIF_IN_WR_LIM_CONF0; + + reg_off += (xin_id / 4) * 4; + bit_off = (xin_id % 4) * 8; + reg_val = DPU_REG_READ(c, reg_off); + limit = (reg_val >> bit_off) & 0xFF; + + return limit; +} + +static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif, + u32 xin_id, bool enable) +{ + struct dpu_hw_blk_reg_map *c = &vbif->hw; + u32 reg_val; + + reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0); + + if (enable) + reg_val |= BIT(xin_id); + else + reg_val &= ~BIT(xin_id); + + DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val); +} + +static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif, + u32 xin_id) +{ + struct dpu_hw_blk_reg_map *c = &vbif->hw; + u32 reg_val; + + reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1); + + return (reg_val & BIT(xin_id)) ? true : false; +} + +static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif, + u32 xin_id, u32 level, u32 remap_level) +{ + struct dpu_hw_blk_reg_map *c; + u32 reg_lvl, reg_val, reg_val_lvl, mask, reg_high, reg_shift; + + if (!vbif) + return; + + c = &vbif->hw; + + reg_lvl = VBIF_XINL_QOS_LVL_REMAP_000(vbif); + reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8); + reg_shift = (xin_id & 0x7) * 4; + + reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high); + reg_val_lvl = DPU_REG_READ(c, reg_lvl + reg_high); + + mask = 0x7 << reg_shift; + + reg_val &= ~mask; + reg_val |= (remap_level << reg_shift) & mask; + + reg_val_lvl &= ~mask; + reg_val_lvl |= (remap_level << reg_shift) & mask; + + DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val); + DPU_REG_WRITE(c, reg_lvl + reg_high, reg_val_lvl); +} + +static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id) +{ + struct dpu_hw_blk_reg_map *c; + u32 reg_val; + + if (!vbif || xin_id >= MAX_XIN_COUNT) + return; + + c = &vbif->hw; + + reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN); + reg_val |= BIT(xin_id); + DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val); +} + +static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops, + unsigned long cap) +{ + ops->set_limit_conf = dpu_hw_set_limit_conf; + ops->get_limit_conf = dpu_hw_get_limit_conf; + ops->set_halt_ctrl = dpu_hw_set_halt_ctrl; + ops->get_halt_ctrl = dpu_hw_get_halt_ctrl; + if (test_bit(DPU_VBIF_QOS_REMAP, &cap)) + ops->set_qos_remap = dpu_hw_set_qos_remap; + ops->set_mem_type = dpu_hw_set_mem_type; + ops->clear_errors = dpu_hw_clear_errors; + ops->set_write_gather_en = dpu_hw_set_write_gather_en; +} + +static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->vbif_count; i++) { + if (vbif == m->vbif[i].id) { + b->blk_addr = addr + m->vbif[i].base; + b->log_mask = DPU_DBG_MASK_VBIF; + return &m->vbif[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_vbif *c; + const struct dpu_vbif_cfg *cfg; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _top_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* + * Assign ops + */ + c->idx = idx; + c->cap = cfg; + _setup_vbif_ops(&c->ops, c->cap->features); + + /* no need to register sub-range in dpu dbg, dump entire vbif io base */ + + return c; +} + +void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif) +{ + kfree(vbif); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h new file mode 100644 index 000000000..6417aa28d --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_VBIF_H +#define _DPU_HW_VBIF_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_util.h" + +struct dpu_hw_vbif; + +/** + * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions + * Assumption is these functions will be called after clocks are enabled + */ +struct dpu_hw_vbif_ops { + /** + * set_limit_conf - set transaction limit config + * @vbif: vbif context driver + * @xin_id: client interface identifier + * @rd: true for read limit; false for write limit + * @limit: outstanding transaction limit + */ + void (*set_limit_conf)(struct dpu_hw_vbif *vbif, + u32 xin_id, bool rd, u32 limit); + + /** + * get_limit_conf - get transaction limit config + * @vbif: vbif context driver + * @xin_id: client interface identifier + * @rd: true for read limit; false for write limit + * @return: outstanding transaction limit + */ + u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif, + u32 xin_id, bool rd); + + /** + * set_halt_ctrl - set halt control + * @vbif: vbif context driver + * @xin_id: client interface identifier + * @enable: halt control enable + */ + void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif, + u32 xin_id, bool enable); + + /** + * get_halt_ctrl - get halt control + * @vbif: vbif context driver + * @xin_id: client interface identifier + * @return: halt control enable + */ + bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif, + u32 xin_id); + + /** + * set_qos_remap - set QoS priority remap + * @vbif: vbif context driver + * @xin_id: client interface identifier + * @level: priority level + * @remap_level: remapped level + */ + void (*set_qos_remap)(struct dpu_hw_vbif *vbif, + u32 xin_id, u32 level, u32 remap_level); + + /** + * set_mem_type - set memory type + * @vbif: vbif context driver + * @xin_id: client interface identifier + * @value: memory type value + */ + void (*set_mem_type)(struct dpu_hw_vbif *vbif, + u32 xin_id, u32 value); + + /** + * clear_errors - clear any vbif errors + * This function clears any detected pending/source errors + * on the VBIF interface, and optionally returns the detected + * error mask(s). + * @vbif: vbif context driver + * @pnd_errors: pointer to pending error reporting variable + * @src_errors: pointer to source error reporting variable + */ + void (*clear_errors)(struct dpu_hw_vbif *vbif, + u32 *pnd_errors, u32 *src_errors); + + /** + * set_write_gather_en - set write_gather enable + * @vbif: vbif context driver + * @xin_id: client interface identifier + */ + void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id); +}; + +struct dpu_hw_vbif { + /* base */ + struct dpu_hw_blk_reg_map hw; + + /* vbif */ + enum dpu_vbif idx; + const struct dpu_vbif_cfg *cap; + + /* ops */ + struct dpu_hw_vbif_ops ops; +}; + +/** + * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx + * @idx: Interface index for which driver object is required + * @addr: Mapped register io address of MDSS + * @m: Pointer to mdss catalog data + */ +struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif); + +#endif /*_DPU_HW_VBIF_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c new file mode 100644 index 000000000..a3e413d27 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0-only + /* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved + */ + +#include "dpu_hw_mdss.h" +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_wb.h" +#include "dpu_formats.h" +#include "dpu_kms.h" + +#define WB_DST_FORMAT 0x000 +#define WB_DST_OP_MODE 0x004 +#define WB_DST_PACK_PATTERN 0x008 +#define WB_DST0_ADDR 0x00C +#define WB_DST1_ADDR 0x010 +#define WB_DST2_ADDR 0x014 +#define WB_DST3_ADDR 0x018 +#define WB_DST_YSTRIDE0 0x01C +#define WB_DST_YSTRIDE1 0x020 +#define WB_DST_YSTRIDE1 0x020 +#define WB_DST_DITHER_BITDEPTH 0x024 +#define WB_DST_MATRIX_ROW0 0x030 +#define WB_DST_MATRIX_ROW1 0x034 +#define WB_DST_MATRIX_ROW2 0x038 +#define WB_DST_MATRIX_ROW3 0x03C +#define WB_DST_WRITE_CONFIG 0x048 +#define WB_ROTATION_DNSCALER 0x050 +#define WB_ROTATOR_PIPE_DOWNSCALER 0x054 +#define WB_N16_INIT_PHASE_X_C03 0x060 +#define WB_N16_INIT_PHASE_X_C12 0x064 +#define WB_N16_INIT_PHASE_Y_C03 0x068 +#define WB_N16_INIT_PHASE_Y_C12 0x06C +#define WB_OUT_SIZE 0x074 +#define WB_ALPHA_X_VALUE 0x078 +#define WB_DANGER_LUT 0x084 +#define WB_SAFE_LUT 0x088 +#define WB_QOS_CTRL 0x090 +#define WB_CREQ_LUT_0 0x098 +#define WB_CREQ_LUT_1 0x09C +#define WB_UBWC_STATIC_CTRL 0x144 +#define WB_MUX 0x150 +#define WB_CROP_CTRL 0x154 +#define WB_CROP_OFFSET 0x158 +#define WB_CSC_BASE 0x260 +#define WB_DST_ADDR_SW_STATUS 0x2B0 +#define WB_CDP_CNTL 0x2B4 +#define WB_OUT_IMAGE_SIZE 0x2C0 +#define WB_OUT_XY 0x2C4 + +/* WB_QOS_CTRL */ +#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0) + +static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb, + const struct dpu_mdss_cfg *m, void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + for (i = 0; i < m->wb_count; i++) { + if (wb == m->wb[i].id) { + b->blk_addr = addr + m->wb[i].base; + b->log_mask = DPU_DBG_MASK_WB; + return &m->wb[i]; + } + } + return ERR_PTR(-EINVAL); +} + +static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx, + struct dpu_hw_wb_cfg *data) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + + DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]); + DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]); + DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]); + DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]); +} + +static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx, + struct dpu_hw_wb_cfg *data) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + const struct dpu_format *fmt = data->dest.format; + u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp; + u32 write_config = 0; + u32 opmode = 0; + u32 dst_addr_sw = 0; + + chroma_samp = fmt->chroma_sample; + + dst_format = (chroma_samp << 23) | + (fmt->fetch_planes << 19) | + (fmt->bits[C3_ALPHA] << 6) | + (fmt->bits[C2_R_Cr] << 4) | + (fmt->bits[C1_B_Cb] << 2) | + (fmt->bits[C0_G_Y] << 0); + + if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) { + dst_format |= BIT(8); /* DSTC3_EN */ + if (!fmt->alpha_enable || + !(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA))) + dst_format |= BIT(14); /* DST_ALPHA_X */ + } + + pattern = (fmt->element[3] << 24) | + (fmt->element[2] << 16) | + (fmt->element[1] << 8) | + (fmt->element[0] << 0); + + dst_format |= (fmt->unpack_align_msb << 18) | + (fmt->unpack_tight << 17) | + ((fmt->unpack_count - 1) << 12) | + ((fmt->bpp - 1) << 9); + + ystride0 = data->dest.plane_pitch[0] | + (data->dest.plane_pitch[1] << 16); + ystride1 = data->dest.plane_pitch[2] | + (data->dest.plane_pitch[3] << 16); + + if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi)) + outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi); + else + outsize = (data->dest.height << 16) | data->dest.width; + + DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF); + DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format); + DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode); + DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern); + DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0); + DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1); + DPU_REG_WRITE(c, WB_OUT_SIZE, outsize); + DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config); + DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw); +} + +static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 image_size, out_size, out_xy; + + image_size = (wb->dest.height << 16) | wb->dest.width; + out_xy = 0; + out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi); + + DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size); + DPU_REG_WRITE(c, WB_OUT_XY, out_xy); + DPU_REG_WRITE(c, WB_OUT_SIZE, out_size); +} + +static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx, + struct dpu_hw_wb_qos_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 qos_ctrl = 0; + + if (!ctx || !cfg) + return; + + DPU_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut); + DPU_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut); + + /* + * for chipsets not using DPU_WB_QOS_8LVL but still using DPU + * driver such as msm8998, the reset value of WB_CREQ_LUT is + * sufficient for writeback to work. SW doesn't need to explicitly + * program a value. + */ + if (ctx->caps && test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features)) { + DPU_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut); + DPU_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32); + } + + if (cfg->danger_safe_en) + qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN; + + DPU_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl); +} + +static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx, + struct dpu_hw_cdp_cfg *cfg) +{ + struct dpu_hw_blk_reg_map *c; + u32 cdp_cntl = 0; + + if (!ctx || !cfg) + return; + + c = &ctx->hw; + + if (cfg->enable) + cdp_cntl |= BIT(0); + if (cfg->ubwc_meta_enable) + cdp_cntl |= BIT(1); + if (cfg->preload_ahead == DPU_WB_CDP_PRELOAD_AHEAD_64) + cdp_cntl |= BIT(3); + + DPU_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl); +} + +static void dpu_hw_wb_bind_pingpong_blk( + struct dpu_hw_wb *ctx, + bool enable, const enum dpu_pingpong pp) +{ + struct dpu_hw_blk_reg_map *c; + int mux_cfg; + + if (!ctx) + return; + + c = &ctx->hw; + + mux_cfg = DPU_REG_READ(c, WB_MUX); + mux_cfg &= ~0xf; + + if (enable) + mux_cfg |= (pp - PINGPONG_0) & 0x7; + else + mux_cfg |= 0xf; + + DPU_REG_WRITE(c, WB_MUX, mux_cfg); +} + +static void _setup_wb_ops(struct dpu_hw_wb_ops *ops, + unsigned long features) +{ + ops->setup_outaddress = dpu_hw_wb_setup_outaddress; + ops->setup_outformat = dpu_hw_wb_setup_format; + + if (test_bit(DPU_WB_XY_ROI_OFFSET, &features)) + ops->setup_roi = dpu_hw_wb_roi; + + if (test_bit(DPU_WB_QOS, &features)) + ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut; + + if (test_bit(DPU_WB_CDP, &features)) + ops->setup_cdp = dpu_hw_wb_setup_cdp; + + if (test_bit(DPU_WB_INPUT_CTRL, &features)) + ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk; +} + +struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx, + void __iomem *addr, const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_wb *c; + const struct dpu_wb_cfg *cfg; + + if (!addr || !m) + return ERR_PTR(-EINVAL); + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _wb_offset(idx, m, addr, &c->hw); + if (IS_ERR(cfg)) { + WARN(1, "Unable to find wb idx=%d\n", idx); + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->mdp = &m->mdp[0]; + c->idx = idx; + c->caps = cfg; + _setup_wb_ops(&c->ops, c->caps->features); + + return c; +} + +void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb) +{ + kfree(hw_wb); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h new file mode 100644 index 000000000..3ff5a4854 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved + */ + +#ifndef _DPU_HW_WB_H +#define _DPU_HW_WB_H + +#include "dpu_hw_catalog.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_top.h" +#include "dpu_hw_util.h" +#include "dpu_hw_pingpong.h" + +struct dpu_hw_wb; + +struct dpu_hw_wb_cfg { + struct dpu_hw_fmt_layout dest; + enum dpu_intf_mode intf_mode; + struct drm_rect roi; + struct drm_rect crop; +}; + +/** + * enum CDP preload ahead address size + */ +enum { + DPU_WB_CDP_PRELOAD_AHEAD_32, + DPU_WB_CDP_PRELOAD_AHEAD_64 +}; + +/** + * struct dpu_hw_wb_qos_cfg : Writeback pipe QoS configuration + * @danger_lut: LUT for generate danger level based on fill level + * @safe_lut: LUT for generate safe level based on fill level + * @creq_lut: LUT for generate creq level based on fill level + * @danger_safe_en: enable danger safe generation + */ +struct dpu_hw_wb_qos_cfg { + u32 danger_lut; + u32 safe_lut; + u64 creq_lut; + bool danger_safe_en; +}; + +/** + * + * struct dpu_hw_wb_ops : Interface to the wb hw driver functions + * Assumption is these functions will be called after clocks are enabled + * @setup_outaddress: setup output address from the writeback job + * @setup_outformat: setup output format of writeback block from writeback job + * @setup_qos_lut: setup qos LUT for writeback block based on input + * @setup_cdp: setup chroma down prefetch block for writeback block + * @bind_pingpong_blk: enable/disable the connection with ping-pong block + */ +struct dpu_hw_wb_ops { + void (*setup_outaddress)(struct dpu_hw_wb *ctx, + struct dpu_hw_wb_cfg *wb); + + void (*setup_outformat)(struct dpu_hw_wb *ctx, + struct dpu_hw_wb_cfg *wb); + + void (*setup_roi)(struct dpu_hw_wb *ctx, + struct dpu_hw_wb_cfg *wb); + + void (*setup_qos_lut)(struct dpu_hw_wb *ctx, + struct dpu_hw_wb_qos_cfg *cfg); + + void (*setup_cdp)(struct dpu_hw_wb *ctx, + struct dpu_hw_cdp_cfg *cfg); + + void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx, + bool enable, const enum dpu_pingpong pp); +}; + +/** + * struct dpu_hw_wb : WB driver object + * @hw: block hardware details + * @mdp: pointer to associated mdp portion of the catalog + * @idx: hardware index number within type + * @wb_hw_caps: hardware capabilities + * @ops: function pointers + * @hw_mdp: MDP top level hardware block + */ +struct dpu_hw_wb { + struct dpu_hw_blk_reg_map hw; + const struct dpu_mdp_cfg *mdp; + + /* wb path */ + int idx; + const struct dpu_wb_cfg *caps; + + /* ops */ + struct dpu_hw_wb_ops ops; + + struct dpu_hw_mdp *hw_mdp; +}; + +/** + * dpu_hw_wb_init(): Initializes and return writeback hw driver object. + * @idx: wb_path index for which driver object is required + * @addr: mapped register io address of MDP + * @m : pointer to mdss catalog data + */ +struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_wb_destroy(): Destroy writeback hw driver object. + * @hw_wb: Pointer to writeback hw driver object + */ +void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb); + +#endif /*_DPU_HW_WB_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h new file mode 100644 index 000000000..93081e82c --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HWIO_H +#define _DPU_HWIO_H + +#include "dpu_hw_util.h" + +/** + * MDP TOP block Register and bit fields and defines + */ +#define DISP_INTF_SEL 0x004 +#define INTR_EN 0x010 +#define INTR_STATUS 0x014 +#define INTR_CLEAR 0x018 +#define INTR2_EN 0x008 +#define INTR2_STATUS 0x00c +#define INTR2_CLEAR 0x02c +#define HIST_INTR_EN 0x01c +#define HIST_INTR_STATUS 0x020 +#define HIST_INTR_CLEAR 0x024 +#define SPLIT_DISPLAY_EN 0x2F4 +#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8 +#define DSPP_IGC_COLOR0_RAM_LUTN 0x300 +#define DSPP_IGC_COLOR1_RAM_LUTN 0x304 +#define DSPP_IGC_COLOR2_RAM_LUTN 0x308 +#define HW_EVENTS_CTL 0x37C +#define CLK_CTRL3 0x3A8 +#define CLK_STATUS3 0x3AC +#define CLK_CTRL4 0x3B0 +#define CLK_STATUS4 0x3B4 +#define CLK_CTRL5 0x3B8 +#define CLK_STATUS5 0x3BC +#define CLK_CTRL7 0x3D0 +#define CLK_STATUS7 0x3D4 +#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0 +#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4 +#define INTF_SW_RESET_MASK 0x3FC +#define HDMI_DP_CORE_SELECT 0x408 +#define MDP_OUT_CTL_0 0x410 +#define MDP_VSYNC_SEL 0x414 +#define DCE_SEL 0x450 + +#endif /*_DPU_HWIO_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c new file mode 100644 index 000000000..b7901b666 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -0,0 +1,1337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Author: Rob Clark + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "msm_drv.h" +#include "msm_mmu.h" +#include "msm_gem.h" +#include "disp/msm_disp_snapshot.h" + +#include "dpu_core_irq.h" +#include "dpu_crtc.h" +#include "dpu_encoder.h" +#include "dpu_formats.h" +#include "dpu_hw_vbif.h" +#include "dpu_kms.h" +#include "dpu_plane.h" +#include "dpu_vbif.h" +#include "dpu_writeback.h" + +#define CREATE_TRACE_POINTS +#include "dpu_trace.h" + +/* + * To enable overall DRM driver logging + * # echo 0x2 > /sys/module/drm/parameters/debug + * + * To enable DRM driver h/w logging + * # echo > /sys/kernel/debug/dri/0/debug/hw_log_mask + * + * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_) + */ +#define DPU_DEBUGFS_DIR "msm_dpu" +#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" + +static int dpu_kms_hw_init(struct msm_kms *kms); +static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); + +#ifdef CONFIG_DEBUG_FS +static int _dpu_danger_signal_status(struct seq_file *s, + bool danger_status) +{ + struct dpu_kms *kms = (struct dpu_kms *)s->private; + struct dpu_danger_safe_status status; + int i; + + if (!kms->hw_mdp) { + DPU_ERROR("invalid arg(s)\n"); + return 0; + } + + memset(&status, 0, sizeof(struct dpu_danger_safe_status)); + + pm_runtime_get_sync(&kms->pdev->dev); + if (danger_status) { + seq_puts(s, "\nDanger signal status:\n"); + if (kms->hw_mdp->ops.get_danger_status) + kms->hw_mdp->ops.get_danger_status(kms->hw_mdp, + &status); + } else { + seq_puts(s, "\nSafe signal status:\n"); + if (kms->hw_mdp->ops.get_safe_status) + kms->hw_mdp->ops.get_safe_status(kms->hw_mdp, + &status); + } + pm_runtime_put_sync(&kms->pdev->dev); + + seq_printf(s, "MDP : 0x%x\n", status.mdp); + + for (i = SSPP_VIG0; i < SSPP_MAX; i++) + seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0, + status.sspp[i]); + seq_puts(s, "\n"); + + return 0; +} + +static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v) +{ + return _dpu_danger_signal_status(s, true); +} +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats); + +static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v) +{ + return _dpu_danger_signal_status(s, false); +} +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats); + +static ssize_t _dpu_plane_danger_read(struct file *file, + char __user *buff, size_t count, loff_t *ppos) +{ + struct dpu_kms *kms = file->private_data; + int len; + char buf[40]; + + len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl); + + return simple_read_from_buffer(buff, count, ppos, buf, len); +} + +static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable) +{ + struct drm_plane *plane; + + drm_for_each_plane(plane, kms->dev) { + if (plane->fb && plane->state) { + dpu_plane_danger_signal_ctrl(plane, enable); + DPU_DEBUG("plane:%d img:%dx%d ", + plane->base.id, plane->fb->width, + plane->fb->height); + DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n", + plane->state->src_x >> 16, + plane->state->src_y >> 16, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->crtc_x, plane->state->crtc_y, + plane->state->crtc_w, plane->state->crtc_h); + } else { + DPU_DEBUG("Inactive plane:%d\n", plane->base.id); + } + } +} + +static ssize_t _dpu_plane_danger_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct dpu_kms *kms = file->private_data; + int disable_panic; + int ret; + + ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic); + if (ret) + return ret; + + if (disable_panic) { + /* Disable panic signal for all active pipes */ + DPU_DEBUG("Disabling danger:\n"); + _dpu_plane_set_danger_state(kms, false); + kms->has_danger_ctrl = false; + } else { + /* Enable panic signal for all active pipes */ + DPU_DEBUG("Enabling danger:\n"); + kms->has_danger_ctrl = true; + _dpu_plane_set_danger_state(kms, true); + } + + return count; +} + +static const struct file_operations dpu_plane_danger_enable = { + .open = simple_open, + .read = _dpu_plane_danger_read, + .write = _dpu_plane_danger_write, +}; + +static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms, + struct dentry *parent) +{ + struct dentry *entry = debugfs_create_dir("danger", parent); + + debugfs_create_file("danger_status", 0600, entry, + dpu_kms, &dpu_debugfs_danger_stats_fops); + debugfs_create_file("safe_status", 0600, entry, + dpu_kms, &dpu_debugfs_safe_stats_fops); + debugfs_create_file("disable_danger", 0600, entry, + dpu_kms, &dpu_plane_danger_enable); + +} + +/* + * Companion structure for dpu_debugfs_create_regset32. + */ +struct dpu_debugfs_regset32 { + uint32_t offset; + uint32_t blk_len; + struct dpu_kms *dpu_kms; +}; + +static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data) +{ + struct dpu_debugfs_regset32 *regset = s->private; + struct dpu_kms *dpu_kms = regset->dpu_kms; + void __iomem *base; + uint32_t i, addr; + + if (!dpu_kms->mmio) + return 0; + + base = dpu_kms->mmio + regset->offset; + + /* insert padding spaces, if needed */ + if (regset->offset & 0xF) { + seq_printf(s, "[%x]", regset->offset & ~0xF); + for (i = 0; i < (regset->offset & 0xF); i += 4) + seq_puts(s, " "); + } + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + + /* main register output */ + for (i = 0; i < regset->blk_len; i += 4) { + addr = regset->offset + i; + if ((addr & 0xF) == 0x0) + seq_printf(s, i ? "\n[%x]" : "[%x]", addr); + seq_printf(s, " %08x", readl_relaxed(base + i)); + } + seq_puts(s, "\n"); + pm_runtime_put_sync(&dpu_kms->pdev->dev); + + return 0; +} + +static int dpu_debugfs_open_regset32(struct inode *inode, + struct file *file) +{ + return single_open(file, _dpu_debugfs_show_regset32, inode->i_private); +} + +static const struct file_operations dpu_fops_regset32 = { + .open = dpu_debugfs_open_regset32, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void dpu_debugfs_create_regset32(const char *name, umode_t mode, + void *parent, + uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms) +{ + struct dpu_debugfs_regset32 *regset; + + if (WARN_ON(!name || !dpu_kms || !length)) + return; + + regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return; + + /* make sure offset is a multiple of 4 */ + regset->offset = round_down(offset, 4); + regset->blk_len = length; + regset->dpu_kms = dpu_kms; + + debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32); +} + +static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + void *p = dpu_hw_util_get_log_mask_ptr(); + struct dentry *entry; + struct drm_device *dev; + struct msm_drm_private *priv; + int i; + + if (!p) + return -EINVAL; + + /* Only create a set of debugfs for the primary node, ignore render nodes */ + if (minor->type != DRM_MINOR_PRIMARY) + return 0; + + dev = dpu_kms->dev; + priv = dev->dev_private; + + entry = debugfs_create_dir("debug", minor->debugfs_root); + + debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p); + + dpu_debugfs_danger_init(dpu_kms, entry); + dpu_debugfs_vbif_init(dpu_kms, entry); + dpu_debugfs_core_irq_init(dpu_kms, entry); + dpu_debugfs_sspp_init(dpu_kms, entry); + + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (priv->dp[i]) + msm_dp_debugfs_init(priv->dp[i], minor); + } + + return dpu_core_perf_debugfs_init(dpu_kms, entry); +} +#endif + +/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct dpu_global_state * +dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms) +{ + return to_dpu_global_state(dpu_kms->global_state.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_private_state *priv_state; + int ret; + + ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + priv_state = drm_atomic_get_private_obj_state(s, + &dpu_kms->global_state); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_dpu_global_state(priv_state); +} + +static struct drm_private_state * +dpu_kms_global_duplicate_state(struct drm_private_obj *obj) +{ + struct dpu_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void dpu_kms_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dpu_global_state *dpu_state = to_dpu_global_state(state); + + kfree(dpu_state); +} + +static const struct drm_private_state_funcs dpu_kms_global_state_funcs = { + .atomic_duplicate_state = dpu_kms_global_duplicate_state, + .atomic_destroy_state = dpu_kms_global_destroy_state, +}; + +static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms) +{ + struct dpu_global_state *state; + + drm_modeset_lock_init(&dpu_kms->global_state_lock); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state, + &state->base, + &dpu_kms_global_state_funcs); + return 0; +} + +static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms) +{ + struct icc_path *path0; + struct icc_path *path1; + struct drm_device *dev = dpu_kms->dev; + struct device *dpu_dev = dev->dev; + + path0 = msm_icc_get(dpu_dev, "mdp0-mem"); + path1 = msm_icc_get(dpu_dev, "mdp1-mem"); + + if (IS_ERR_OR_NULL(path0)) + return PTR_ERR_OR_ZERO(path0); + + dpu_kms->path[0] = path0; + dpu_kms->num_paths = 1; + + if (!IS_ERR_OR_NULL(path1)) { + dpu_kms->path[1] = path1; + dpu_kms->num_paths++; + } + return 0; +} + +static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + return dpu_crtc_vblank(crtc, true); +} + +static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + dpu_crtc_vblank(crtc, false); +} + +static void dpu_kms_enable_commit(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + pm_runtime_get_sync(&dpu_kms->pdev->dev); +} + +static void dpu_kms_disable_commit(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} + +static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + + drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { + ktime_t vsync_time; + + if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0) + return vsync_time; + } + + return ktime_get(); +} + +static void dpu_kms_prepare_commit(struct msm_kms *kms, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_encoder *encoder; + int i; + + if (!kms) + return; + + /* Call prepare_commit for all affected encoders */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc_state->encoder_mask) { + dpu_encoder_prepare_commit(encoder); + } + } +} + +static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + struct drm_crtc *crtc; + + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + + trace_dpu_kms_commit(DRMID(crtc)); + dpu_crtc_commit_kickoff(crtc); + } +} + +static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + struct drm_crtc *crtc; + + DPU_ATRACE_BEGIN("kms_complete_commit"); + + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) + dpu_crtc_complete_commit(crtc); + + DPU_ATRACE_END("kms_complete_commit"); +} + +static void dpu_kms_wait_for_commit_done(struct msm_kms *kms, + struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + struct drm_device *dev; + int ret; + + if (!kms || !crtc || !crtc->state) { + DPU_ERROR("invalid params\n"); + return; + } + + dev = crtc->dev; + + if (!crtc->state->enable) { + DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id); + return; + } + + if (!crtc->state->active) { + DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id); + return; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc != crtc) + continue; + /* + * Wait for post-flush if necessary to delay before + * plane_cleanup. For example, wait for vsync in case of video + * mode panels. This may be a no-op for command mode panels. + */ + trace_dpu_kms_wait_for_commit_done(DRMID(crtc)); + ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE); + if (ret && ret != -EWOULDBLOCK) { + DPU_ERROR("wait for commit done returned %d\n", ret); + break; + } + } +} + +static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask) +{ + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + struct drm_crtc *crtc; + + for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) + dpu_kms_wait_for_commit_done(kms, crtc); +} + +static int _dpu_kms_initialize_dsi(struct drm_device *dev, + struct msm_drm_private *priv, + struct dpu_kms *dpu_kms) +{ + struct drm_encoder *encoder = NULL; + struct msm_display_info info; + int i, rc = 0; + + if (!(priv->dsi[0] || priv->dsi[1])) + return rc; + + /* + * We support following confiurations: + * - Single DSI host (dsi0 or dsi1) + * - Two independent DSI hosts + * - Bonded DSI0 and DSI1 hosts + * + * TODO: Support swapping DSI0 and DSI1 in the bonded setup. + */ + for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { + int other = (i + 1) % 2; + + if (!priv->dsi[i]) + continue; + + if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && + !msm_dsi_is_master_dsi(priv->dsi[i])) + continue; + + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } + + memset(&info, 0, sizeof(info)); + info.intf_type = encoder->encoder_type; + + rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", + i, rc); + break; + } + + info.h_tile_instance[info.num_of_h_tiles++] = i; + info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]); + + info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]); + + if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) { + rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", + other, rc); + break; + } + + info.h_tile_instance[info.num_of_h_tiles++] = other; + } + + rc = dpu_encoder_setup(dev, encoder, &info); + if (rc) + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", + encoder->base.id, rc); + } + + return rc; +} + +static int _dpu_kms_initialize_displayport(struct drm_device *dev, + struct msm_drm_private *priv, + struct dpu_kms *dpu_kms) +{ + struct drm_encoder *encoder = NULL; + struct msm_display_info info; + int rc; + int i; + + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (!priv->dp[i]) + continue; + + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } + + memset(&info, 0, sizeof(info)); + rc = msm_dp_modeset_init(priv->dp[i], dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); + drm_encoder_cleanup(encoder); + return rc; + } + + info.num_of_h_tiles = 1; + info.h_tile_instance[0] = i; + info.intf_type = encoder->encoder_type; + rc = dpu_encoder_setup(dev, encoder, &info); + if (rc) { + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", + encoder->base.id, rc); + return rc; + } + } + + return 0; +} + +static int _dpu_kms_initialize_writeback(struct drm_device *dev, + struct msm_drm_private *priv, struct dpu_kms *dpu_kms, + const u32 *wb_formats, int n_formats) +{ + struct drm_encoder *encoder = NULL; + struct msm_display_info info; + int rc; + + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } + + memset(&info, 0, sizeof(info)); + + rc = dpu_writeback_init(dev, encoder, wb_formats, + n_formats); + if (rc) { + DPU_ERROR("dpu_writeback_init, rc = %d\n", rc); + drm_encoder_cleanup(encoder); + return rc; + } + + info.num_of_h_tiles = 1; + /* use only WB idx 2 instance for DPU */ + info.h_tile_instance[0] = WB_2; + info.intf_type = encoder->encoder_type; + + rc = dpu_encoder_setup(dev, encoder, &info); + if (rc) { + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", + encoder->base.id, rc); + return rc; + } + + return 0; +} + +/** + * _dpu_kms_setup_displays - create encoders, bridges and connectors + * for underlying displays + * @dev: Pointer to drm device structure + * @priv: Pointer to private drm device data + * @dpu_kms: Pointer to dpu kms structure + * Returns: Zero on success + */ +static int _dpu_kms_setup_displays(struct drm_device *dev, + struct msm_drm_private *priv, + struct dpu_kms *dpu_kms) +{ + int rc = 0; + int i; + + rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_dsi failed, rc = %d\n", rc); + return rc; + } + + rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms); + if (rc) { + DPU_ERROR("initialize_DP failed, rc = %d\n", rc); + return rc; + } + + /* Since WB isn't a driver check the catalog before initializing */ + if (dpu_kms->catalog->wb_count) { + for (i = 0; i < dpu_kms->catalog->wb_count; i++) { + if (dpu_kms->catalog->wb[i].id == WB_2) { + rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms, + dpu_kms->catalog->wb[i].format_list, + dpu_kms->catalog->wb[i].num_formats); + if (rc) { + DPU_ERROR("initialize_WB failed, rc = %d\n", rc); + return rc; + } + } + } + } + + return rc; +} + +#define MAX_PLANES 20 +static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) +{ + struct drm_device *dev; + struct drm_plane *primary_planes[MAX_PLANES], *plane; + struct drm_plane *cursor_planes[MAX_PLANES] = { NULL }; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + unsigned int num_encoders; + + struct msm_drm_private *priv; + const struct dpu_mdss_cfg *catalog; + + int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret; + int max_crtc_count; + dev = dpu_kms->dev; + priv = dev->dev_private; + catalog = dpu_kms->catalog; + + /* + * Create encoder and query display drivers to create + * bridges and connectors + */ + ret = _dpu_kms_setup_displays(dev, priv, dpu_kms); + if (ret) + return ret; + + num_encoders = 0; + drm_for_each_encoder(encoder, dev) + num_encoders++; + + max_crtc_count = min(catalog->mixer_count, num_encoders); + + /* Create the planes, keeping track of one primary/cursor per crtc */ + for (i = 0; i < catalog->sspp_count; i++) { + enum drm_plane_type type; + + if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)) + && cursor_planes_idx < max_crtc_count) + type = DRM_PLANE_TYPE_CURSOR; + else if (primary_planes_idx < max_crtc_count) + type = DRM_PLANE_TYPE_PRIMARY; + else + type = DRM_PLANE_TYPE_OVERLAY; + + DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n", + type, catalog->sspp[i].features, + catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)); + + plane = dpu_plane_init(dev, catalog->sspp[i].id, type, + (1UL << max_crtc_count) - 1); + if (IS_ERR(plane)) { + DPU_ERROR("dpu_plane_init failed\n"); + ret = PTR_ERR(plane); + return ret; + } + + if (type == DRM_PLANE_TYPE_CURSOR) + cursor_planes[cursor_planes_idx++] = plane; + else if (type == DRM_PLANE_TYPE_PRIMARY) + primary_planes[primary_planes_idx++] = plane; + } + + max_crtc_count = min(max_crtc_count, primary_planes_idx); + + /* Create one CRTC per encoder */ + for (i = 0; i < max_crtc_count; i++) { + crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]); + if (IS_ERR(crtc)) { + ret = PTR_ERR(crtc); + return ret; + } + priv->crtcs[priv->num_crtcs++] = crtc; + } + + /* All CRTCs are compatible with all encoders */ + drm_for_each_encoder(encoder, dev) + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; + + return 0; +} + +static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) +{ + int i; + + if (dpu_kms->hw_intr) + dpu_hw_intr_destroy(dpu_kms->hw_intr); + dpu_kms->hw_intr = NULL; + + /* safe to call these more than once during shutdown */ + _dpu_kms_mmu_destroy(dpu_kms); + + if (dpu_kms->catalog) { + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + if (dpu_kms->hw_vbif[i]) { + dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); + dpu_kms->hw_vbif[i] = NULL; + } + } + } + + if (dpu_kms->rm_init) + dpu_rm_destroy(&dpu_kms->rm); + dpu_kms->rm_init = false; + + dpu_kms->catalog = NULL; + + if (dpu_kms->vbif[VBIF_NRT]) + devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]); + dpu_kms->vbif[VBIF_NRT] = NULL; + + if (dpu_kms->vbif[VBIF_RT]) + devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]); + dpu_kms->vbif[VBIF_RT] = NULL; + + if (dpu_kms->hw_mdp) + dpu_hw_mdp_destroy(dpu_kms->hw_mdp); + dpu_kms->hw_mdp = NULL; + + if (dpu_kms->mmio) + devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio); + dpu_kms->mmio = NULL; +} + +static void dpu_kms_destroy(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms; + + if (!kms) { + DPU_ERROR("invalid kms\n"); + return; + } + + dpu_kms = to_dpu_kms(kms); + + _dpu_kms_hw_destroy(dpu_kms); + + msm_kms_destroy(&dpu_kms->base); + + if (dpu_kms->rpm_enabled) + pm_runtime_disable(&dpu_kms->pdev->dev); +} + +static int dpu_irq_postinstall(struct msm_kms *kms) +{ + struct msm_drm_private *priv; + struct dpu_kms *dpu_kms = to_dpu_kms(kms); + int i; + + if (!dpu_kms || !dpu_kms->dev) + return -EINVAL; + + priv = dpu_kms->dev->dev_private; + if (!priv) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) + msm_dp_irq_postinstall(priv->dp[i]); + + return 0; +} + +static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms) +{ + int i; + struct dpu_kms *dpu_kms; + const struct dpu_mdss_cfg *cat; + + dpu_kms = to_dpu_kms(kms); + + cat = dpu_kms->catalog; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + + /* dump CTL sub-blocks HW regs info */ + for (i = 0; i < cat->ctl_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len, + dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i); + + /* dump DSPP sub-blocks HW regs info */ + for (i = 0; i < cat->dspp_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, + dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i); + + /* dump INTF sub-blocks HW regs info */ + for (i = 0; i < cat->intf_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->intf[i].len, + dpu_kms->mmio + cat->intf[i].base, "intf_%d", i); + + /* dump PP sub-blocks HW regs info */ + for (i = 0; i < cat->pingpong_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, + dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i); + + /* dump SSPP sub-blocks HW regs info */ + for (i = 0; i < cat->sspp_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, + dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i); + + /* dump LM sub-blocks HW regs info */ + for (i = 0; i < cat->mixer_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len, + dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i); + + /* dump WB sub-blocks HW regs info */ + for (i = 0; i < cat->wb_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->wb[i].len, + dpu_kms->mmio + cat->wb[i].base, "wb_%d", i); + + msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len, + dpu_kms->mmio + cat->mdp[0].base, "top"); + + /* dump DSC sub-blocks HW regs info */ + for (i = 0; i < cat->dsc_count; i++) + msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, + dpu_kms->mmio + cat->dsc[i].base, "dsc_%d", i); + + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} + +static const struct msm_kms_funcs kms_funcs = { + .hw_init = dpu_kms_hw_init, + .irq_preinstall = dpu_core_irq_preinstall, + .irq_postinstall = dpu_irq_postinstall, + .irq_uninstall = dpu_core_irq_uninstall, + .irq = dpu_core_irq, + .enable_commit = dpu_kms_enable_commit, + .disable_commit = dpu_kms_disable_commit, + .vsync_time = dpu_kms_vsync_time, + .prepare_commit = dpu_kms_prepare_commit, + .flush_commit = dpu_kms_flush_commit, + .wait_flush = dpu_kms_wait_flush, + .complete_commit = dpu_kms_complete_commit, + .enable_vblank = dpu_kms_enable_vblank, + .disable_vblank = dpu_kms_disable_vblank, + .check_modified_format = dpu_format_check_modified_format, + .get_format = dpu_get_msm_format, + .destroy = dpu_kms_destroy, + .snapshot = dpu_kms_mdp_snapshot, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = dpu_kms_debugfs_init, +#endif +}; + +static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) +{ + struct msm_mmu *mmu; + + if (!dpu_kms->base.aspace) + return; + + mmu = dpu_kms->base.aspace->mmu; + + mmu->funcs->detach(mmu); + msm_gem_address_space_put(dpu_kms->base.aspace); + + dpu_kms->base.aspace = NULL; +} + +static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) +{ + struct msm_gem_address_space *aspace; + + aspace = msm_kms_init_aspace(dpu_kms->dev); + if (IS_ERR(aspace)) + return PTR_ERR(aspace); + + dpu_kms->base.aspace = aspace; + + return 0; +} + +u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name) +{ + struct clk *clk; + + clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name); + if (!clk) + return -EINVAL; + + return clk_get_rate(clk); +} + +static int dpu_kms_hw_init(struct msm_kms *kms) +{ + struct dpu_kms *dpu_kms; + struct drm_device *dev; + int i, rc = -EINVAL; + + if (!kms) { + DPU_ERROR("invalid kms\n"); + return rc; + } + + dpu_kms = to_dpu_kms(kms); + dev = dpu_kms->dev; + + rc = dpu_kms_global_obj_init(dpu_kms); + if (rc) + return rc; + + atomic_set(&dpu_kms->bandwidth_ref, 0); + + dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp"); + if (IS_ERR(dpu_kms->mmio)) { + rc = PTR_ERR(dpu_kms->mmio); + DPU_ERROR("mdp register memory map failed: %d\n", rc); + dpu_kms->mmio = NULL; + goto error; + } + DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio); + + dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif"); + if (IS_ERR(dpu_kms->vbif[VBIF_RT])) { + rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]); + DPU_ERROR("vbif register memory map failed: %d\n", rc); + dpu_kms->vbif[VBIF_RT] = NULL; + goto error; + } + dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt"); + if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) { + dpu_kms->vbif[VBIF_NRT] = NULL; + DPU_DEBUG("VBIF NRT is not defined"); + } + + dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma"); + if (IS_ERR(dpu_kms->reg_dma)) { + dpu_kms->reg_dma = NULL; + DPU_DEBUG("REG_DMA is not defined"); + } + + dpu_kms_parse_data_bus_icc_path(dpu_kms); + + rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev); + if (rc < 0) + goto error; + + dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); + + pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev); + + dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev); + if (IS_ERR_OR_NULL(dpu_kms->catalog)) { + rc = PTR_ERR(dpu_kms->catalog); + if (!dpu_kms->catalog) + rc = -EINVAL; + DPU_ERROR("catalog init failed: %d\n", rc); + dpu_kms->catalog = NULL; + goto power_error; + } + + /* + * Now we need to read the HW catalog and initialize resources such as + * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc + */ + rc = _dpu_kms_mmu_init(dpu_kms); + if (rc) { + DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc); + goto power_error; + } + + rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio); + if (rc) { + DPU_ERROR("rm init failed: %d\n", rc); + goto power_error; + } + + dpu_kms->rm_init = true; + + dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio, + dpu_kms->catalog); + if (IS_ERR(dpu_kms->hw_mdp)) { + rc = PTR_ERR(dpu_kms->hw_mdp); + DPU_ERROR("failed to get hw_mdp: %d\n", rc); + dpu_kms->hw_mdp = NULL; + goto power_error; + } + + for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { + u32 vbif_idx = dpu_kms->catalog->vbif[i].id; + + dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx, + dpu_kms->vbif[vbif_idx], dpu_kms->catalog); + if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) { + rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]); + DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc); + dpu_kms->hw_vbif[vbif_idx] = NULL; + goto power_error; + } + } + + rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog, + msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core")); + if (rc) { + DPU_ERROR("failed to init perf %d\n", rc); + goto perf_err; + } + + dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog); + if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) { + rc = PTR_ERR(dpu_kms->hw_intr); + DPU_ERROR("hw_intr init failed: %d\n", rc); + dpu_kms->hw_intr = NULL; + goto hw_intr_init_err; + } + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + + /* + * max crtc width is equal to the max mixer width * 2 and max height is + * is 4K + */ + dev->mode_config.max_width = + dpu_kms->catalog->caps->max_mixer_width * 2; + dev->mode_config.max_height = 4096; + + dev->max_vblank_count = 0xffffffff; + /* Disable vblank irqs aggressively for power-saving */ + dev->vblank_disable_immediate = true; + + /* + * _dpu_kms_drm_obj_init should create the DRM related objects + * i.e. CRTCs, planes, encoders, connectors and so forth + */ + rc = _dpu_kms_drm_obj_init(dpu_kms); + if (rc) { + DPU_ERROR("modeset init failed: %d\n", rc); + goto drm_obj_init_err; + } + + dpu_vbif_init_memtypes(dpu_kms); + + pm_runtime_put_sync(&dpu_kms->pdev->dev); + + return 0; + +drm_obj_init_err: + dpu_core_perf_destroy(&dpu_kms->perf); +hw_intr_init_err: +perf_err: +power_error: + pm_runtime_put_sync(&dpu_kms->pdev->dev); +error: + _dpu_kms_hw_destroy(dpu_kms); + + return rc; +} + +static int dpu_kms_init(struct drm_device *ddev) +{ + struct msm_drm_private *priv = ddev->dev_private; + struct device *dev = ddev->dev; + struct platform_device *pdev = to_platform_device(dev); + struct dpu_kms *dpu_kms; + int irq; + struct dev_pm_opp *opp; + int ret = 0; + unsigned long max_freq = ULONG_MAX; + + dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL); + if (!dpu_kms) + return -ENOMEM; + + ret = devm_pm_opp_set_clkname(dev, "core"); + if (ret) + return ret; + /* OPP table is optional */ + ret = devm_pm_opp_of_add_table(dev); + if (ret && ret != -ENODEV) { + dev_err(dev, "invalid OPP table in device tree\n"); + return ret; + } + + ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks); + if (ret < 0) { + DPU_ERROR("failed to parse clocks, ret=%d\n", ret); + return ret; + } + dpu_kms->num_clocks = ret; + + opp = dev_pm_opp_find_freq_floor(dev, &max_freq); + if (!IS_ERR(opp)) + dev_pm_opp_put(opp); + + dev_pm_opp_set_rate(dev, max_freq); + + ret = msm_kms_init(&dpu_kms->base, &kms_funcs); + if (ret) { + DPU_ERROR("failed to init kms, ret=%d\n", ret); + return ret; + } + dpu_kms->dev = ddev; + dpu_kms->pdev = pdev; + + pm_runtime_enable(&pdev->dev); + dpu_kms->rpm_enabled = true; + + priv->kms = &dpu_kms->base; + + irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0); + if (!irq) { + DPU_ERROR("failed to get irq\n"); + return -EINVAL; + } + dpu_kms->base.irq = irq; + + return 0; +} + +static int dpu_dev_probe(struct platform_device *pdev) +{ + return msm_drv_probe(&pdev->dev, dpu_kms_init); +} + +static int dpu_dev_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &msm_drm_ops); + + return 0; +} + +static int __maybe_unused dpu_runtime_suspend(struct device *dev) +{ + int i; + struct platform_device *pdev = to_platform_device(dev); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + + /* Drop the performance state vote */ + dev_pm_opp_set_rate(dev, 0); + clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks); + + for (i = 0; i < dpu_kms->num_paths; i++) + icc_set_bw(dpu_kms->path[i], 0, 0); + + return 0; +} + +static int __maybe_unused dpu_runtime_resume(struct device *dev) +{ + int rc = -1; + struct platform_device *pdev = to_platform_device(dev); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_encoder *encoder; + struct drm_device *ddev; + + ddev = dpu_kms->dev; + + rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks); + if (rc) { + DPU_ERROR("clock enable failed rc:%d\n", rc); + return rc; + } + + dpu_vbif_init_memtypes(dpu_kms); + + drm_for_each_encoder(encoder, ddev) + dpu_encoder_virt_runtime_resume(encoder); + + return rc; +} + +static const struct dev_pm_ops dpu_pm_ops = { + SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + .prepare = msm_pm_prepare, + .complete = msm_pm_complete, +}; + +static const struct of_device_id dpu_dt_match[] = { + { .compatible = "qcom,msm8998-dpu", }, + { .compatible = "qcom,qcm2290-dpu", }, + { .compatible = "qcom,sdm845-dpu", }, + { .compatible = "qcom,sc7180-dpu", }, + { .compatible = "qcom,sc7280-dpu", }, + { .compatible = "qcom,sc8180x-dpu", }, + { .compatible = "qcom,sm8150-dpu", }, + { .compatible = "qcom,sm8250-dpu", }, + {} +}; +MODULE_DEVICE_TABLE(of, dpu_dt_match); + +static struct platform_driver dpu_driver = { + .probe = dpu_dev_probe, + .remove = dpu_dev_remove, + .shutdown = msm_drv_shutdown, + .driver = { + .name = "msm_dpu", + .of_match_table = dpu_dt_match, + .pm = &dpu_pm_ops, + }, +}; + +void __init msm_dpu_register(void) +{ + platform_driver_register(&dpu_driver); +} + +void __exit msm_dpu_unregister(void) +{ + platform_driver_unregister(&dpu_driver); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h new file mode 100644 index 000000000..ed80ed678 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __DPU_KMS_H__ +#define __DPU_KMS_H__ + +#include + +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "msm_mmu.h" +#include "msm_gem.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_ctl.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_interrupts.h" +#include "dpu_hw_top.h" +#include "dpu_rm.h" +#include "dpu_core_perf.h" + +#define DRMID(x) ((x) ? (x)->base.id : -1) + +/** + * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs + * @fmt: Pointer to format string + */ +#define DPU_DEBUG(fmt, ...) \ + do { \ + if (drm_debug_enabled(DRM_UT_KMS)) \ + DRM_DEBUG(fmt, ##__VA_ARGS__); \ + else \ + pr_debug(fmt, ##__VA_ARGS__); \ + } while (0) + +/** + * DPU_DEBUG_DRIVER - macro for hardware driver logging + * @fmt: Pointer to format string + */ +#define DPU_DEBUG_DRIVER(fmt, ...) \ + do { \ + if (drm_debug_enabled(DRM_UT_DRIVER)) \ + DRM_ERROR(fmt, ##__VA_ARGS__); \ + else \ + pr_debug(fmt, ##__VA_ARGS__); \ + } while (0) + +#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__) + +/** + * ktime_compare_safe - compare two ktime structures + * This macro is similar to the standard ktime_compare() function, but + * attempts to also handle ktime overflows. + * @A: First ktime value + * @B: Second ktime value + * Returns: -1 if A < B, 0 if A == B, 1 if A > B + */ +#define ktime_compare_safe(A, B) \ + ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0)) + +#define DPU_NAME_SIZE 12 + +struct dpu_kms { + struct msm_kms base; + struct drm_device *dev; + int core_rev; + const struct dpu_mdss_cfg *catalog; + + /* io/register spaces: */ + void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma; + + struct regulator *vdd; + struct regulator *mmagic; + struct regulator *venus; + + struct dpu_hw_intr *hw_intr; + + struct dpu_core_perf perf; + + /* + * Global private object state, Do not access directly, use + * dpu_kms_global_get_state() + */ + struct drm_modeset_lock global_state_lock; + struct drm_private_obj global_state; + + struct dpu_rm rm; + bool rm_init; + + struct dpu_hw_vbif *hw_vbif[VBIF_MAX]; + struct dpu_hw_mdp *hw_mdp; + + bool has_danger_ctrl; + + struct platform_device *pdev; + bool rpm_enabled; + + struct clk_bulk_data *clocks; + size_t num_clocks; + + /* reference count bandwidth requests, so we know when we can + * release bandwidth. Each atomic update increments, and frame- + * done event decrements. Additionally, for video mode, the + * reference is incremented when crtc is enabled, and decremented + * when disabled. + */ + atomic_t bandwidth_ref; + struct icc_path *path[2]; + u32 num_paths; +}; + +struct vsync_info { + u32 frame_count; + u32 line_count; +}; + +#define to_dpu_kms(x) container_of(x, struct dpu_kms, base) + +#define to_dpu_global_state(x) container_of(x, struct dpu_global_state, base) + +/* Global private object state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + */ +struct dpu_global_state { + struct drm_private_state base; + + uint32_t pingpong_to_enc_id[PINGPONG_MAX - PINGPONG_0]; + uint32_t mixer_to_enc_id[LM_MAX - LM_0]; + uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; + uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0]; + uint32_t dsc_to_enc_id[DSC_MAX - DSC_0]; +}; + +struct dpu_global_state + *dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms); +struct dpu_global_state + *__must_check dpu_kms_get_global_state(struct drm_atomic_state *s); + +/** + * Debugfs functions - extra helper functions for debugfs support + * + * Main debugfs documentation is located at, + * + * Documentation/filesystems/debugfs.rst + * + * @dpu_debugfs_create_regset32: Create 32-bit register dump file + */ + +/** + * dpu_debugfs_create_regset32 - Create register read back file for debugfs + * + * This function is almost identical to the standard debugfs_create_regset32() + * function, with the main difference being that a list of register + * names/offsets do not need to be provided. The 'read' function simply outputs + * sequential register values over a specified range. + * + * @name: File name within debugfs + * @mode: File mode within debugfs + * @parent: Parent directory entry within debugfs, can be NULL + * @offset: sub-block offset + * @length: sub-block length, in bytes + * @dpu_kms: pointer to dpu kms structure + */ +void dpu_debugfs_create_regset32(const char *name, umode_t mode, + void *parent, + uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms); + +/** + * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs + * + * The return value should be passed as the 'parent' argument to subsequent + * debugfs create calls. + * + * @dpu_kms: Pointer to DPU's KMS structure + * + * Return: dentry pointer for DPU's debugfs location + */ +void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms); + +/** + * DPU info management functions + * These functions/definitions allow for building up a 'dpu_info' structure + * containing one or more "key=value\n" entries. + */ +#define DPU_KMS_INFO_MAX_SIZE 4096 + +/** + * Vblank enable/disable functions + */ +int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); + +/** + * dpu_kms_get_clk_rate() - get the clock rate + * @dpu_kms: pointer to dpu_kms structure + * @clock_name: clock name to get the rate + * + * Return: current clock rate + */ +u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name); + +#endif /* __dpu_kms_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c new file mode 100644 index 000000000..62d48c0f9 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -0,0 +1,1539 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "msm_drv.h" +#include "dpu_kms.h" +#include "dpu_formats.h" +#include "dpu_hw_sspp.h" +#include "dpu_trace.h" +#include "dpu_crtc.h" +#include "dpu_vbif.h" +#include "dpu_plane.h" + +#define DPU_DEBUG_PLANE(pl, fmt, ...) DRM_DEBUG_ATOMIC("plane%d " fmt,\ + (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__) + +#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\ + (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__) + +#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci)) +#define PHASE_STEP_SHIFT 21 +#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT)) +#define PHASE_RESIDUAL 15 + +#define SHARP_STRENGTH_DEFAULT 32 +#define SHARP_EDGE_THR_DEFAULT 112 +#define SHARP_SMOOTH_THR_DEFAULT 8 +#define SHARP_NOISE_THR_DEFAULT 2 + +#define DPU_NAME_SIZE 12 + +#define DPU_PLANE_COLOR_FILL_FLAG BIT(31) +#define DPU_ZPOS_MAX 255 + +/* multirect rect index */ +enum { + R0, + R1, + R_MAX +}; + +/* + * Default Preload Values + */ +#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4 +#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3 +#define DPU_QSEED4_DEFAULT_PRELOAD_V 0x2 +#define DPU_QSEED4_DEFAULT_PRELOAD_H 0x4 + +#define DEFAULT_REFRESH_RATE 60 + +static const uint32_t qcom_compressed_supported_formats[] = { + DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_BGR565, + + DRM_FORMAT_NV12, +}; + +/** + * enum dpu_plane_qos - Different qos configurations for each pipe + * + * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe. + * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe. + * this configuration is mutually exclusive from VBLANK_CTRL. + * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe. + */ +enum dpu_plane_qos { + DPU_PLANE_QOS_VBLANK_CTRL = BIT(0), + DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1), + DPU_PLANE_QOS_PANIC_CTRL = BIT(2), +}; + +/* + * struct dpu_plane - local dpu plane structure + * @aspace: address space pointer + * @csc_ptr: Points to dpu_csc_cfg structure to use for current + * @catalog: Points to dpu catalog structure + * @revalidate: force revalidation of all the plane properties + */ +struct dpu_plane { + struct drm_plane base; + + struct mutex lock; + + enum dpu_sspp pipe; + + struct dpu_hw_pipe *pipe_hw; + uint32_t color_fill; + bool is_error; + bool is_rt_pipe; + const struct dpu_mdss_cfg *catalog; +}; + +static const uint64_t supported_format_modifiers[] = { + DRM_FORMAT_MOD_QCOM_COMPRESSED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +#define to_dpu_plane(x) container_of(x, struct dpu_plane, base) + +static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + + return to_dpu_kms(priv->kms); +} + +/** + * _dpu_plane_calc_bw - calculate bandwidth required for a plane + * @plane: Pointer to drm plane. + * @fb: Pointer to framebuffer associated with the given plane + * @pipe_cfg: Pointer to pipe configuration + * Result: Updates calculated bandwidth in the plane state. + * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest) + * Prefill BW Equation: line src bytes * line_time + */ +static void _dpu_plane_calc_bw(struct drm_plane *plane, + struct drm_framebuffer *fb, + struct dpu_hw_pipe_cfg *pipe_cfg) +{ + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + const struct dpu_format *fmt = NULL; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + int src_width, src_height, dst_height, fps; + u64 plane_pixel_rate, plane_bit_rate; + u64 plane_prefill_bw; + u64 plane_bw; + u32 hw_latency_lines; + u64 scale_factor; + int vbp, vpw, vfp; + + pstate = to_dpu_plane_state(plane->state); + mode = &plane->state->crtc->mode; + + fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier); + + src_width = drm_rect_width(&pipe_cfg->src_rect); + src_height = drm_rect_height(&pipe_cfg->src_rect); + dst_height = drm_rect_height(&pipe_cfg->dst_rect); + fps = drm_mode_vrefresh(mode); + vbp = mode->vtotal - mode->vsync_end; + vpw = mode->vsync_end - mode->vsync_start; + vfp = mode->vsync_start - mode->vdisplay; + hw_latency_lines = dpu_kms->catalog->perf->min_prefill_lines; + scale_factor = src_height > dst_height ? + mult_frac(src_height, 1, dst_height) : 1; + + plane_pixel_rate = src_width * mode->vtotal * fps; + plane_bit_rate = plane_pixel_rate * fmt->bpp; + + plane_bw = plane_bit_rate * scale_factor; + + plane_prefill_bw = plane_bw * hw_latency_lines; + + if ((vbp+vpw) > hw_latency_lines) + do_div(plane_prefill_bw, (vbp+vpw)); + else if ((vbp+vpw+vfp) < hw_latency_lines) + do_div(plane_prefill_bw, (vbp+vpw+vfp)); + else + do_div(plane_prefill_bw, hw_latency_lines); + + + pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw); +} + +/** + * _dpu_plane_calc_clk - calculate clock required for a plane + * @plane: Pointer to drm plane. + * @pipe_cfg: Pointer to pipe configuration + * Result: Updates calculated clock in the plane state. + * Clock equation: dst_w * v_total * fps * (src_h / dst_h) + */ +static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg *pipe_cfg) +{ + struct dpu_plane_state *pstate; + struct drm_display_mode *mode; + int dst_width, src_height, dst_height, fps; + + pstate = to_dpu_plane_state(plane->state); + mode = &plane->state->crtc->mode; + + src_height = drm_rect_height(&pipe_cfg->src_rect); + dst_width = drm_rect_width(&pipe_cfg->dst_rect); + dst_height = drm_rect_height(&pipe_cfg->dst_rect); + fps = drm_mode_vrefresh(mode); + + pstate->plane_clk = + dst_width * mode->vtotal * fps; + + if (src_height > dst_height) { + pstate->plane_clk *= src_height; + do_div(pstate->plane_clk, dst_height); + } +} + +/** + * _dpu_plane_calc_fill_level - calculate fill level of the given source format + * @plane: Pointer to drm plane + * @fmt: Pointer to source buffer format + * @src_width: width of source buffer + * Return: fill level corresponding to the source buffer/format or 0 if error + */ +static int _dpu_plane_calc_fill_level(struct drm_plane *plane, + const struct dpu_format *fmt, u32 src_width) +{ + struct dpu_plane *pdpu; + struct dpu_plane_state *pstate; + u32 fixed_buff_size; + u32 total_fl; + + if (!fmt || !plane->state || !src_width || !fmt->bpp) { + DPU_ERROR("invalid arguments\n"); + return 0; + } + + pdpu = to_dpu_plane(plane); + pstate = to_dpu_plane_state(plane->state); + fixed_buff_size = pdpu->catalog->caps->pixel_ram_size; + + /* FIXME: in multirect case account for the src_width of all the planes */ + + if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) { + if (fmt->chroma_sample == DPU_CHROMA_420) { + /* NV12 */ + total_fl = (fixed_buff_size / 2) / + ((src_width + 32) * fmt->bpp); + } else { + /* non NV12 */ + total_fl = (fixed_buff_size / 2) * 2 / + ((src_width + 32) * fmt->bpp); + } + } else { + if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) { + total_fl = (fixed_buff_size / 2) * 2 / + ((src_width + 32) * fmt->bpp); + } else { + total_fl = (fixed_buff_size) * 2 / + ((src_width + 32) * fmt->bpp); + } + } + + DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s w:%u fl:%u\n", + pdpu->pipe - SSPP_VIG0, + (char *)&fmt->base.pixel_format, + src_width, total_fl); + + return total_fl; +} + +/** + * _dpu_plane_set_qos_lut - set QoS LUT of the given plane + * @plane: Pointer to drm plane + * @fb: Pointer to framebuffer associated with the given plane + * @pipe_cfg: Pointer to pipe configuration + */ +static void _dpu_plane_set_qos_lut(struct drm_plane *plane, + struct drm_framebuffer *fb, struct dpu_hw_pipe_cfg *pipe_cfg) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + const struct dpu_format *fmt = NULL; + u64 qos_lut; + u32 total_fl = 0, lut_usage; + + if (!pdpu->is_rt_pipe) { + lut_usage = DPU_QOS_LUT_USAGE_NRT; + } else { + fmt = dpu_get_dpu_format_ext( + fb->format->format, + fb->modifier); + total_fl = _dpu_plane_calc_fill_level(plane, fmt, + drm_rect_width(&pipe_cfg->src_rect)); + + if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) + lut_usage = DPU_QOS_LUT_USAGE_LINEAR; + else + lut_usage = DPU_QOS_LUT_USAGE_MACROTILE; + } + + qos_lut = _dpu_hw_get_qos_lut( + &pdpu->catalog->perf->qos_lut_tbl[lut_usage], total_fl); + + trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0, + (fmt) ? fmt->base.pixel_format : 0, + pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage); + + DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n", + pdpu->pipe - SSPP_VIG0, + fmt ? (char *)&fmt->base.pixel_format : NULL, + pdpu->is_rt_pipe, total_fl, qos_lut); + + pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, qos_lut); +} + +/** + * _dpu_plane_set_danger_lut - set danger/safe LUT of the given plane + * @plane: Pointer to drm plane + * @fb: Pointer to framebuffer associated with the given plane + */ +static void _dpu_plane_set_danger_lut(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + const struct dpu_format *fmt = NULL; + u32 danger_lut, safe_lut; + + if (!pdpu->is_rt_pipe) { + danger_lut = pdpu->catalog->perf->danger_lut_tbl + [DPU_QOS_LUT_USAGE_NRT]; + safe_lut = pdpu->catalog->perf->safe_lut_tbl + [DPU_QOS_LUT_USAGE_NRT]; + } else { + fmt = dpu_get_dpu_format_ext( + fb->format->format, + fb->modifier); + + if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) { + danger_lut = pdpu->catalog->perf->danger_lut_tbl + [DPU_QOS_LUT_USAGE_LINEAR]; + safe_lut = pdpu->catalog->perf->safe_lut_tbl + [DPU_QOS_LUT_USAGE_LINEAR]; + } else { + danger_lut = pdpu->catalog->perf->danger_lut_tbl + [DPU_QOS_LUT_USAGE_MACROTILE]; + safe_lut = pdpu->catalog->perf->safe_lut_tbl + [DPU_QOS_LUT_USAGE_MACROTILE]; + } + } + + trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0, + (fmt) ? fmt->base.pixel_format : 0, + (fmt) ? fmt->fetch_mode : 0, + danger_lut, + safe_lut); + + DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n", + pdpu->pipe - SSPP_VIG0, + fmt ? (char *)&fmt->base.pixel_format : NULL, + fmt ? fmt->fetch_mode : -1, + danger_lut, + safe_lut); + + pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw, + danger_lut, safe_lut); +} + +/** + * _dpu_plane_set_qos_ctrl - set QoS control of the given plane + * @plane: Pointer to drm plane + * @enable: true to enable QoS control + * @flags: QoS control mode (enum dpu_plane_qos) + */ +static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane, + bool enable, u32 flags) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_hw_pipe_qos_cfg pipe_qos_cfg; + + memset(&pipe_qos_cfg, 0, sizeof(pipe_qos_cfg)); + + if (flags & DPU_PLANE_QOS_VBLANK_CTRL) { + pipe_qos_cfg.creq_vblank = pdpu->pipe_hw->cap->sblk->creq_vblank; + pipe_qos_cfg.danger_vblank = + pdpu->pipe_hw->cap->sblk->danger_vblank; + pipe_qos_cfg.vblank_en = enable; + } + + if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) { + /* this feature overrules previous VBLANK_CTRL */ + pipe_qos_cfg.vblank_en = false; + pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */ + } + + if (flags & DPU_PLANE_QOS_PANIC_CTRL) + pipe_qos_cfg.danger_safe_en = enable; + + if (!pdpu->is_rt_pipe) { + pipe_qos_cfg.vblank_en = false; + pipe_qos_cfg.danger_safe_en = false; + } + + DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n", + pdpu->pipe - SSPP_VIG0, + pipe_qos_cfg.danger_safe_en, + pipe_qos_cfg.vblank_en, + pipe_qos_cfg.creq_vblank, + pipe_qos_cfg.danger_vblank, + pdpu->is_rt_pipe); + + pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw, + &pipe_qos_cfg); +} + +/** + * _dpu_plane_set_ot_limit - set OT limit for the given plane + * @plane: Pointer to drm plane + * @crtc: Pointer to drm crtc + * @pipe_cfg: Pointer to pipe configuration + */ +static void _dpu_plane_set_ot_limit(struct drm_plane *plane, + struct drm_crtc *crtc, struct dpu_hw_pipe_cfg *pipe_cfg) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_vbif_set_ot_params ot_params; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + + memset(&ot_params, 0, sizeof(ot_params)); + ot_params.xin_id = pdpu->pipe_hw->cap->xin_id; + ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE; + ot_params.width = drm_rect_width(&pipe_cfg->src_rect); + ot_params.height = drm_rect_height(&pipe_cfg->src_rect); + ot_params.is_wfd = !pdpu->is_rt_pipe; + ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode); + ot_params.vbif_idx = VBIF_RT; + ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl; + ot_params.rd = true; + + dpu_vbif_set_ot_limit(dpu_kms, &ot_params); +} + +/** + * _dpu_plane_set_qos_remap - set vbif QoS for the given plane + * @plane: Pointer to drm plane + */ +static void _dpu_plane_set_qos_remap(struct drm_plane *plane) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_vbif_set_qos_params qos_params; + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + + memset(&qos_params, 0, sizeof(qos_params)); + qos_params.vbif_idx = VBIF_RT; + qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl; + qos_params.xin_id = pdpu->pipe_hw->cap->xin_id; + qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0; + qos_params.is_rt = pdpu->is_rt_pipe; + + DPU_DEBUG_PLANE(pdpu, "pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n", + qos_params.num, + qos_params.vbif_idx, + qos_params.xin_id, qos_params.is_rt, + qos_params.clk_ctrl); + + dpu_vbif_set_qos_remap(dpu_kms, &qos_params); +} + +static void _dpu_plane_set_scanout(struct drm_plane *plane, + struct dpu_plane_state *pstate, + struct dpu_hw_pipe_cfg *pipe_cfg, + struct drm_framebuffer *fb) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); + struct msm_gem_address_space *aspace = kms->base.aspace; + int ret; + + ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout); + if (ret == -EAGAIN) + DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n"); + else if (ret) + DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); + else if (pdpu->pipe_hw->ops.setup_sourceaddress) { + trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx, + &pipe_cfg->layout, + pstate->multirect_index); + pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg, + pstate->multirect_index); + } +} + +static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu, + struct dpu_plane_state *pstate, + uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h, + struct dpu_hw_scaler3_cfg *scale_cfg, + const struct dpu_format *fmt, + uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v) +{ + uint32_t i; + bool inline_rotation = pstate->rotation & DRM_MODE_ROTATE_90; + + /* + * For inline rotation cases, scaler config is post-rotation, + * so swap the dimensions here. However, pixel extension will + * need pre-rotation settings. + */ + if (inline_rotation) + swap(src_w, src_h); + + scale_cfg->phase_step_x[DPU_SSPP_COMP_0] = + mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w); + scale_cfg->phase_step_y[DPU_SSPP_COMP_0] = + mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h); + + + scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] = + scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v; + scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] = + scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h; + + scale_cfg->phase_step_x[DPU_SSPP_COMP_2] = + scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2]; + scale_cfg->phase_step_y[DPU_SSPP_COMP_2] = + scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2]; + + scale_cfg->phase_step_x[DPU_SSPP_COMP_3] = + scale_cfg->phase_step_x[DPU_SSPP_COMP_0]; + scale_cfg->phase_step_y[DPU_SSPP_COMP_3] = + scale_cfg->phase_step_y[DPU_SSPP_COMP_0]; + + for (i = 0; i < DPU_MAX_PLANES; i++) { + scale_cfg->src_width[i] = src_w; + scale_cfg->src_height[i] = src_h; + if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { + scale_cfg->src_width[i] /= chroma_subsmpl_h; + scale_cfg->src_height[i] /= chroma_subsmpl_v; + } + + if (pdpu->pipe_hw->cap->features & + BIT(DPU_SSPP_SCALER_QSEED4)) { + scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H; + scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V; + } else { + scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H; + scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V; + } + } + if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h) + && (src_w == dst_w)) + return; + + scale_cfg->dst_width = dst_w; + scale_cfg->dst_height = dst_h; + scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL; + scale_cfg->uv_filter_cfg = DPU_SCALE_BIL; + scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL; + scale_cfg->lut_flag = 0; + scale_cfg->blend_cfg = 1; + scale_cfg->enable = 1; +} + +static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg, + struct dpu_hw_pixel_ext *pixel_ext, + uint32_t src_w, uint32_t src_h, + uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v) +{ + int i; + + for (i = 0; i < DPU_MAX_PLANES; i++) { + if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { + src_w /= chroma_subsmpl_h; + src_h /= chroma_subsmpl_v; + } + + pixel_ext->num_ext_pxls_top[i] = src_h; + pixel_ext->num_ext_pxls_left[i] = src_w; + } +} + +static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = { + { + /* S15.16 format */ + 0x00012A00, 0x00000000, 0x00019880, + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, + 0x00012A00, 0x00020480, 0x00000000, + }, + /* signed bias */ + { 0xfff0, 0xff80, 0xff80,}, + { 0x0, 0x0, 0x0,}, + /* unsigned clamp */ + { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,}, + { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,}, +}; + +static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = { + { + /* S15.16 format */ + 0x00012A00, 0x00000000, 0x00019880, + 0x00012A00, 0xFFFF9B80, 0xFFFF3000, + 0x00012A00, 0x00020480, 0x00000000, + }, + /* signed bias */ + { 0xffc0, 0xfe00, 0xfe00,}, + { 0x0, 0x0, 0x0,}, + /* unsigned clamp */ + { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,}, + { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,}, +}; + +static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_plane *pdpu, const struct dpu_format *fmt) +{ + const struct dpu_csc_cfg *csc_ptr; + + if (!pdpu) { + DPU_ERROR("invalid plane\n"); + return NULL; + } + + if (!DPU_FORMAT_IS_YUV(fmt)) + return NULL; + + if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->pipe_hw->cap->features) + csc_ptr = &dpu_csc10_YUV2RGB_601L; + else + csc_ptr = &dpu_csc_YUV2RGB_601L; + + DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n", + csc_ptr->csc_mv[0], + csc_ptr->csc_mv[1], + csc_ptr->csc_mv[2]); + + return csc_ptr; +} + +static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu, + struct dpu_plane_state *pstate, + const struct dpu_format *fmt, bool color_fill, + struct dpu_hw_pipe_cfg *pipe_cfg) +{ + const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format); + struct dpu_hw_scaler3_cfg scaler3_cfg; + struct dpu_hw_pixel_ext pixel_ext; + u32 src_width = drm_rect_width(&pipe_cfg->src_rect); + u32 src_height = drm_rect_height(&pipe_cfg->src_rect); + u32 dst_width = drm_rect_width(&pipe_cfg->dst_rect); + u32 dst_height = drm_rect_height(&pipe_cfg->dst_rect); + + memset(&scaler3_cfg, 0, sizeof(scaler3_cfg)); + memset(&pixel_ext, 0, sizeof(pixel_ext)); + + /* don't chroma subsample if decimating */ + /* update scaler. calculate default config for QSEED3 */ + _dpu_plane_setup_scaler3(pdpu, pstate, + src_width, + src_height, + dst_width, + dst_height, + &scaler3_cfg, fmt, + info->hsub, info->vsub); + + /* configure pixel extension based on scalar config */ + _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext, + src_width, src_height, info->hsub, info->vsub); + + if (pdpu->pipe_hw->ops.setup_pe) + pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw, + &pixel_ext); + + /** + * when programmed in multirect mode, scalar block will be + * bypassed. Still we need to update alpha and bitwidth + * ONLY for RECT0 + */ + if (pdpu->pipe_hw->ops.setup_scaler && + pstate->multirect_index != DPU_SSPP_RECT_1) + pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw, + pipe_cfg, + &scaler3_cfg); +} + +/** + * _dpu_plane_color_fill - enables color fill on plane + * @pdpu: Pointer to DPU plane object + * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red + * @alpha: 8-bit fill alpha value, 255 selects 100% alpha + * Returns: 0 on success + */ +static int _dpu_plane_color_fill(struct dpu_plane *pdpu, + uint32_t color, uint32_t alpha) +{ + const struct dpu_format *fmt; + const struct drm_plane *plane = &pdpu->base; + struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state); + struct dpu_hw_pipe_cfg pipe_cfg; + + DPU_DEBUG_PLANE(pdpu, "\n"); + + /* + * select fill format to match user property expectation, + * h/w only supports RGB variants + */ + fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888); + + /* update sspp */ + if (fmt && pdpu->pipe_hw->ops.setup_solidfill) { + pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw, + (color & 0xFFFFFF) | ((alpha & 0xFF) << 24), + pstate->multirect_index); + + /* override scaler/decimation if solid fill */ + pipe_cfg.dst_rect = pstate->base.dst; + + pipe_cfg.src_rect.x1 = 0; + pipe_cfg.src_rect.y1 = 0; + pipe_cfg.src_rect.x2 = + drm_rect_width(&pipe_cfg.dst_rect); + pipe_cfg.src_rect.y2 = + drm_rect_height(&pipe_cfg.dst_rect); + + if (pdpu->pipe_hw->ops.setup_format) + pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, + fmt, DPU_SSPP_SOLID_FILL, + pstate->multirect_index); + + if (pdpu->pipe_hw->ops.setup_rects) + pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw, + &pipe_cfg, + pstate->multirect_index); + + _dpu_plane_setup_scaler(pdpu, pstate, fmt, true, &pipe_cfg); + } + + return 0; +} + +void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state) +{ + struct dpu_plane_state *pstate = to_dpu_plane_state(drm_state); + + pstate->multirect_index = DPU_SSPP_RECT_SOLO; + pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE; +} + +int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane) +{ + struct dpu_plane_state *pstate[R_MAX]; + const struct drm_plane_state *drm_state[R_MAX]; + struct drm_rect src[R_MAX], dst[R_MAX]; + struct dpu_plane *dpu_plane[R_MAX]; + const struct dpu_format *fmt[R_MAX]; + int i, buffer_lines; + unsigned int max_tile_height = 1; + bool parallel_fetch_qualified = true; + bool has_tiled_rect = false; + + for (i = 0; i < R_MAX; i++) { + const struct msm_format *msm_fmt; + + drm_state[i] = i ? plane->r1 : plane->r0; + msm_fmt = msm_framebuffer_format(drm_state[i]->fb); + fmt[i] = to_dpu_format(msm_fmt); + + if (DPU_FORMAT_IS_UBWC(fmt[i])) { + has_tiled_rect = true; + if (fmt[i]->tile_height > max_tile_height) + max_tile_height = fmt[i]->tile_height; + } + } + + for (i = 0; i < R_MAX; i++) { + int width_threshold; + + pstate[i] = to_dpu_plane_state(drm_state[i]); + dpu_plane[i] = to_dpu_plane(drm_state[i]->plane); + + if (pstate[i] == NULL) { + DPU_ERROR("DPU plane state of plane id %d is NULL\n", + drm_state[i]->plane->base.id); + return -EINVAL; + } + + src[i].x1 = drm_state[i]->src_x >> 16; + src[i].y1 = drm_state[i]->src_y >> 16; + src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16); + src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16); + + dst[i] = drm_plane_state_dest(drm_state[i]); + + if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 || + drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) { + DPU_ERROR_PLANE(dpu_plane[i], + "scaling is not supported in multirect mode\n"); + return -EINVAL; + } + + if (DPU_FORMAT_IS_YUV(fmt[i])) { + DPU_ERROR_PLANE(dpu_plane[i], + "Unsupported format for multirect mode\n"); + return -EINVAL; + } + + /** + * SSPP PD_MEM is split half - one for each RECT. + * Tiled formats need 5 lines of buffering while fetching + * whereas linear formats need only 2 lines. + * So we cannot support more than half of the supported SSPP + * width for tiled formats. + */ + width_threshold = dpu_plane[i]->catalog->caps->max_linewidth; + if (has_tiled_rect) + width_threshold /= 2; + + if (parallel_fetch_qualified && + drm_rect_width(&src[i]) > width_threshold) + parallel_fetch_qualified = false; + + } + + /* Validate RECT's and set the mode */ + + /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */ + if (parallel_fetch_qualified) { + pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL; + pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL; + + goto done; + } + + /* TIME_MX Mode */ + buffer_lines = 2 * max_tile_height; + + if (dst[R1].y1 >= dst[R0].y2 + buffer_lines || + dst[R0].y1 >= dst[R1].y2 + buffer_lines) { + pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX; + pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX; + } else { + DPU_ERROR( + "No multirect mode possible for the planes (%d - %d)\n", + drm_state[R0]->plane->base.id, + drm_state[R1]->plane->base.id); + return -EINVAL; + } + +done: + pstate[R0]->multirect_index = DPU_SSPP_RECT_0; + pstate[R1]->multirect_index = DPU_SSPP_RECT_1; + + DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n", + pstate[R0]->multirect_mode, pstate[R0]->multirect_index); + DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n", + pstate[R1]->multirect_mode, pstate[R1]->multirect_index); + return 0; +} + +static int dpu_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_framebuffer *fb = new_state->fb; + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_state); + struct dpu_hw_fmt_layout layout; + struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base); + int ret; + + if (!new_state->fb) + return 0; + + DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id); + + /* cache aspace */ + pstate->aspace = kms->base.aspace; + + /* + * TODO: Need to sort out the msm_framebuffer_prepare() call below so + * we can use msm_atomic_prepare_fb() instead of doing the + * implicit fence and fb prepare by hand here. + */ + drm_gem_plane_helper_prepare_fb(plane, new_state); + + if (pstate->aspace) { + ret = msm_framebuffer_prepare(new_state->fb, + pstate->aspace, pstate->needs_dirtyfb); + if (ret) { + DPU_ERROR("failed to prepare framebuffer\n"); + return ret; + } + } + + /* validate framebuffer layout before commit */ + ret = dpu_format_populate_layout(pstate->aspace, + new_state->fb, &layout); + if (ret) { + DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret); + return ret; + } + + return 0; +} + +static void dpu_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *old_pstate; + + if (!old_state || !old_state->fb) + return; + + old_pstate = to_dpu_plane_state(old_state); + + DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id); + + msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace, + old_pstate->needs_dirtyfb); +} + +static bool dpu_plane_validate_src(struct drm_rect *src, + struct drm_rect *fb_rect, + uint32_t min_src_size) +{ + /* Ensure fb size is supported */ + if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH || + drm_rect_height(fb_rect) > MAX_IMG_HEIGHT) + return false; + + /* Ensure src rect is above the minimum size */ + if (drm_rect_width(src) < min_src_size || + drm_rect_height(src) < min_src_size) + return false; + + /* Ensure src is fully encapsulated in fb */ + return drm_rect_intersect(fb_rect, src) && + drm_rect_equals(fb_rect, src); +} + +static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu, + const struct dpu_sspp_sub_blks *sblk, + struct drm_rect src, const struct dpu_format *fmt) +{ + size_t num_formats; + const u32 *supported_formats; + + if (!sblk->rotation_cfg) { + DPU_ERROR("invalid rotation cfg\n"); + return -EINVAL; + } + + if (drm_rect_width(&src) > sblk->rotation_cfg->rot_maxheight) { + DPU_DEBUG_PLANE(pdpu, "invalid height for inline rot:%d max:%d\n", + src.y2, sblk->rotation_cfg->rot_maxheight); + return -EINVAL; + } + + supported_formats = sblk->rotation_cfg->rot_format_list; + num_formats = sblk->rotation_cfg->rot_num_formats; + + if (!DPU_FORMAT_IS_UBWC(fmt) || + !dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats)) + return -EINVAL; + + return 0; +} + +static int dpu_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + int ret = 0, min_scale; + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); + const struct drm_crtc_state *crtc_state = NULL; + const struct dpu_format *fmt; + struct drm_rect src, dst, fb_rect = { 0 }; + uint32_t min_src_size, max_linewidth; + unsigned int rotation; + uint32_t supported_rotations; + const struct dpu_sspp_cfg *pipe_hw_caps = pdpu->pipe_hw->cap; + const struct dpu_sspp_sub_blks *sblk = pdpu->pipe_hw->cap->sblk; + + if (new_plane_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); + + min_scale = FRAC_16_16(1, sblk->maxupscale); + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + min_scale, + sblk->maxdwnscale << 16, + true, true); + if (ret) { + DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); + return ret; + } + if (!new_plane_state->visible) + return 0; + + src.x1 = new_plane_state->src_x >> 16; + src.y1 = new_plane_state->src_y >> 16; + src.x2 = src.x1 + (new_plane_state->src_w >> 16); + src.y2 = src.y1 + (new_plane_state->src_h >> 16); + + dst = drm_plane_state_dest(new_plane_state); + + fb_rect.x2 = new_plane_state->fb->width; + fb_rect.y2 = new_plane_state->fb->height; + + max_linewidth = pdpu->catalog->caps->max_linewidth; + + fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb)); + + min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1; + + if (DPU_FORMAT_IS_YUV(fmt) && + (!(pipe_hw_caps->features & DPU_SSPP_SCALER) || + !(pipe_hw_caps->features & DPU_SSPP_CSC_ANY))) { + DPU_DEBUG_PLANE(pdpu, + "plane doesn't have scaler/csc for yuv\n"); + return -EINVAL; + + /* check src bounds */ + } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) { + DPU_DEBUG_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&src)); + return -E2BIG; + + /* valid yuv image */ + } else if (DPU_FORMAT_IS_YUV(fmt) && + (src.x1 & 0x1 || src.y1 & 0x1 || + drm_rect_width(&src) & 0x1 || + drm_rect_height(&src) & 0x1)) { + DPU_DEBUG_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&src)); + return -EINVAL; + + /* min dst support */ + } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) { + DPU_DEBUG_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n", + DRM_RECT_ARG(&dst)); + return -EINVAL; + + /* check decimated source width */ + } else if (drm_rect_width(&src) > max_linewidth) { + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", + DRM_RECT_ARG(&src), max_linewidth); + return -E2BIG; + } + + supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0; + + if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) + supported_rotations |= DRM_MODE_ROTATE_90; + + rotation = drm_rotation_simplify(new_plane_state->rotation, + supported_rotations); + + if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) && + (rotation & DRM_MODE_ROTATE_90)) { + ret = dpu_plane_check_inline_rotation(pdpu, sblk, src, fmt); + if (ret) + return ret; + } + + pstate->rotation = rotation; + pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); + + return 0; +} + +void dpu_plane_flush(struct drm_plane *plane) +{ + struct dpu_plane *pdpu; + struct dpu_plane_state *pstate; + + if (!plane || !plane->state) { + DPU_ERROR("invalid plane\n"); + return; + } + + pdpu = to_dpu_plane(plane); + pstate = to_dpu_plane_state(plane->state); + + /* + * These updates have to be done immediately before the plane flush + * timing, and may not be moved to the atomic_update/mode_set functions. + */ + if (pdpu->is_error) + /* force white frame with 100% alpha pipe output on error */ + _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF); + else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) + /* force 100% alpha */ + _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF); + else if (pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_csc) { + const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(plane->state->fb)); + const struct dpu_csc_cfg *csc_ptr = _dpu_plane_get_csc(pdpu, fmt); + + if (csc_ptr) + pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, csc_ptr); + } + + /* flag h/w flush complete */ + if (plane->state) + pstate->pending = false; +} + +/** + * dpu_plane_set_error: enable/disable error condition + * @plane: pointer to drm_plane structure + * @error: error value to set + */ +void dpu_plane_set_error(struct drm_plane *plane, bool error) +{ + struct dpu_plane *pdpu; + + if (!plane) + return; + + pdpu = to_dpu_plane(plane); + pdpu->is_error = error; +} + +static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) +{ + uint32_t src_flags; + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct drm_plane_state *state = plane->state; + struct dpu_plane_state *pstate = to_dpu_plane_state(state); + struct drm_crtc *crtc = state->crtc; + struct drm_framebuffer *fb = state->fb; + bool is_rt_pipe; + const struct dpu_format *fmt = + to_dpu_format(msm_framebuffer_format(fb)); + struct dpu_hw_pipe_cfg pipe_cfg; + + memset(&pipe_cfg, 0, sizeof(struct dpu_hw_pipe_cfg)); + + _dpu_plane_set_scanout(plane, pstate, &pipe_cfg, fb); + + pstate->pending = true; + + is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT); + pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe); + pdpu->is_rt_pipe = is_rt_pipe; + + _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); + + DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT + ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src), + crtc->base.id, DRM_RECT_ARG(&state->dst), + (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt)); + + pipe_cfg.src_rect = state->src; + + /* state->src is 16.16, src_rect is not */ + pipe_cfg.src_rect.x1 >>= 16; + pipe_cfg.src_rect.x2 >>= 16; + pipe_cfg.src_rect.y1 >>= 16; + pipe_cfg.src_rect.y2 >>= 16; + + pipe_cfg.dst_rect = state->dst; + + /* override for color fill */ + if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) { + /* skip remaining processing on color fill */ + return; + } + + if (pdpu->pipe_hw->ops.setup_rects) { + pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw, + &pipe_cfg, + pstate->multirect_index); + } + + _dpu_plane_setup_scaler(pdpu, pstate, fmt, false, &pipe_cfg); + + if (pdpu->pipe_hw->ops.setup_multirect) + pdpu->pipe_hw->ops.setup_multirect( + pdpu->pipe_hw, + pstate->multirect_index, + pstate->multirect_mode); + + if (pdpu->pipe_hw->ops.setup_format) { + unsigned int rotation = pstate->rotation; + + src_flags = 0x0; + + if (rotation & DRM_MODE_REFLECT_X) + src_flags |= DPU_SSPP_FLIP_LR; + + if (rotation & DRM_MODE_REFLECT_Y) + src_flags |= DPU_SSPP_FLIP_UD; + + if (rotation & DRM_MODE_ROTATE_90) + src_flags |= DPU_SSPP_ROT_90; + + /* update format */ + pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags, + pstate->multirect_index); + + if (pdpu->pipe_hw->ops.setup_cdp) { + struct dpu_hw_cdp_cfg cdp_cfg; + + memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg)); + + cdp_cfg.enable = pdpu->catalog->perf->cdp_cfg + [DPU_PERF_CDP_USAGE_RT].rd_enable; + cdp_cfg.ubwc_meta_enable = + DPU_FORMAT_IS_UBWC(fmt); + cdp_cfg.tile_amortize_enable = + DPU_FORMAT_IS_UBWC(fmt) || + DPU_FORMAT_IS_TILE(fmt); + cdp_cfg.preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64; + + pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, &cdp_cfg, pstate->multirect_index); + } + } + + _dpu_plane_set_qos_lut(plane, fb, &pipe_cfg); + _dpu_plane_set_danger_lut(plane, fb); + + if (plane->type != DRM_PLANE_TYPE_CURSOR) { + _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL); + _dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg); + } + + if (pstate->needs_qos_remap) { + pstate->needs_qos_remap = false; + _dpu_plane_set_qos_remap(plane); + } + + _dpu_plane_calc_bw(plane, fb, &pipe_cfg); + + _dpu_plane_calc_clk(plane, &pipe_cfg); +} + +static void _dpu_plane_atomic_disable(struct drm_plane *plane) +{ + struct drm_plane_state *state = plane->state; + struct dpu_plane_state *pstate = to_dpu_plane_state(state); + + trace_dpu_plane_disable(DRMID(plane), false, + pstate->multirect_mode); + + pstate->pending = true; +} + +static void dpu_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + + pdpu->is_error = false; + + DPU_DEBUG_PLANE(pdpu, "\n"); + + if (!new_state->visible) { + _dpu_plane_atomic_disable(plane); + } else { + dpu_plane_sspp_atomic_update(plane); + } +} + +static void dpu_plane_destroy(struct drm_plane *plane) +{ + struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL; + + DPU_DEBUG_PLANE(pdpu, "\n"); + + if (pdpu) { + _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL); + + mutex_destroy(&pdpu->lock); + + /* this will destroy the states as well */ + drm_plane_cleanup(plane); + + dpu_hw_sspp_destroy(pdpu->pipe_hw); + + kfree(pdpu); + } +} + +static void dpu_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + __drm_atomic_helper_plane_destroy_state(state); + kfree(to_dpu_plane_state(state)); +} + +static struct drm_plane_state * +dpu_plane_duplicate_state(struct drm_plane *plane) +{ + struct dpu_plane *pdpu; + struct dpu_plane_state *pstate; + struct dpu_plane_state *old_state; + + if (!plane) { + DPU_ERROR("invalid plane\n"); + return NULL; + } else if (!plane->state) { + DPU_ERROR("invalid plane state\n"); + return NULL; + } + + old_state = to_dpu_plane_state(plane->state); + pdpu = to_dpu_plane(plane); + pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL); + if (!pstate) { + DPU_ERROR_PLANE(pdpu, "failed to allocate state\n"); + return NULL; + } + + DPU_DEBUG_PLANE(pdpu, "\n"); + + pstate->pending = false; + + __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base); + + return &pstate->base; +} + +static const char * const multirect_mode_name[] = { + [DPU_SSPP_MULTIRECT_NONE] = "none", + [DPU_SSPP_MULTIRECT_PARALLEL] = "parallel", + [DPU_SSPP_MULTIRECT_TIME_MX] = "time_mx", +}; + +static const char * const multirect_index_name[] = { + [DPU_SSPP_RECT_SOLO] = "solo", + [DPU_SSPP_RECT_0] = "rect_0", + [DPU_SSPP_RECT_1] = "rect_1", +}; + +static const char *dpu_get_multirect_mode(enum dpu_sspp_multirect_mode mode) +{ + if (WARN_ON(mode >= ARRAY_SIZE(multirect_mode_name))) + return "unknown"; + + return multirect_mode_name[mode]; +} + +static const char *dpu_get_multirect_index(enum dpu_sspp_multirect_index index) +{ + if (WARN_ON(index >= ARRAY_SIZE(multirect_index_name))) + return "unknown"; + + return multirect_index_name[index]; +} + +static void dpu_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + const struct dpu_plane_state *pstate = to_dpu_plane_state(state); + const struct dpu_plane *pdpu = to_dpu_plane(state->plane); + + drm_printf(p, "\tstage=%d\n", pstate->stage); + drm_printf(p, "\tsspp=%s\n", pdpu->pipe_hw->cap->name); + drm_printf(p, "\tmultirect_mode=%s\n", dpu_get_multirect_mode(pstate->multirect_mode)); + drm_printf(p, "\tmultirect_index=%s\n", dpu_get_multirect_index(pstate->multirect_index)); +} + +static void dpu_plane_reset(struct drm_plane *plane) +{ + struct dpu_plane *pdpu; + struct dpu_plane_state *pstate; + + if (!plane) { + DPU_ERROR("invalid plane\n"); + return; + } + + pdpu = to_dpu_plane(plane); + DPU_DEBUG_PLANE(pdpu, "\n"); + + /* remove previous state, if present */ + if (plane->state) { + dpu_plane_destroy_state(plane, plane->state); + plane->state = NULL; + } + + pstate = kzalloc(sizeof(*pstate), GFP_KERNEL); + if (!pstate) { + DPU_ERROR_PLANE(pdpu, "failed to allocate state\n"); + return; + } + + __drm_atomic_helper_plane_reset(plane, &pstate->base); +} + +#ifdef CONFIG_DEBUG_FS +void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) +{ + struct dpu_plane *pdpu = to_dpu_plane(plane); + struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane); + + if (!pdpu->is_rt_pipe) + return; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL); + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} + +/* SSPP live inside dpu_plane private data only. Enumerate them here. */ +void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) +{ + struct drm_plane *plane; + struct dentry *entry = debugfs_create_dir("sspp", debugfs_root); + + if (IS_ERR(entry)) + return; + + drm_for_each_plane(plane, dpu_kms->dev) { + struct dpu_plane *pdpu = to_dpu_plane(plane); + + _dpu_hw_sspp_init_debugfs(pdpu->pipe_hw, dpu_kms, entry); + } +} +#endif + +static bool dpu_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, uint64_t modifier) +{ + if (modifier == DRM_FORMAT_MOD_LINEAR) + return true; + + if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) + return dpu_find_format(format, qcom_compressed_supported_formats, + ARRAY_SIZE(qcom_compressed_supported_formats)); + + return false; +} + +static const struct drm_plane_funcs dpu_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = dpu_plane_destroy, + .reset = dpu_plane_reset, + .atomic_duplicate_state = dpu_plane_duplicate_state, + .atomic_destroy_state = dpu_plane_destroy_state, + .atomic_print_state = dpu_plane_atomic_print_state, + .format_mod_supported = dpu_plane_format_mod_supported, +}; + +static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { + .prepare_fb = dpu_plane_prepare_fb, + .cleanup_fb = dpu_plane_cleanup_fb, + .atomic_check = dpu_plane_atomic_check, + .atomic_update = dpu_plane_atomic_update, +}; + +enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane) +{ + return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE; +} + +/* initialize plane */ +struct drm_plane *dpu_plane_init(struct drm_device *dev, + uint32_t pipe, enum drm_plane_type type, + unsigned long possible_crtcs) +{ + struct drm_plane *plane = NULL; + const uint32_t *format_list; + struct dpu_plane *pdpu; + struct msm_drm_private *priv = dev->dev_private; + struct dpu_kms *kms = to_dpu_kms(priv->kms); + uint32_t num_formats; + uint32_t supported_rotations; + int ret = -EINVAL; + + /* create and zero local structure */ + pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL); + if (!pdpu) { + DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe); + ret = -ENOMEM; + return ERR_PTR(ret); + } + + /* cache local stuff for later */ + plane = &pdpu->base; + pdpu->pipe = pipe; + + /* initialize underlying h/w driver */ + pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog); + if (IS_ERR(pdpu->pipe_hw)) { + DPU_ERROR("[%u]SSPP init failed\n", pipe); + ret = PTR_ERR(pdpu->pipe_hw); + goto clean_plane; + } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) { + DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe); + goto clean_sspp; + } + + format_list = pdpu->pipe_hw->cap->sblk->format_list; + num_formats = pdpu->pipe_hw->cap->sblk->num_formats; + + ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, + format_list, num_formats, + supported_format_modifiers, type, NULL); + if (ret) + goto clean_sspp; + + pdpu->catalog = kms->catalog; + + ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX); + if (ret) + DPU_ERROR("failed to install zpos property, rc = %d\n", ret); + + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + + supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; + + if (pdpu->pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION)) + supported_rotations |= DRM_MODE_ROTATE_MASK; + + drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, supported_rotations); + + drm_plane_enable_fb_damage_clips(plane); + + /* success! finalize initialization */ + drm_plane_helper_add(plane, &dpu_plane_helper_funcs); + + mutex_init(&pdpu->lock); + + DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name, + pipe, plane->base.id); + return plane; + +clean_sspp: + if (pdpu && pdpu->pipe_hw) + dpu_hw_sspp_destroy(pdpu->pipe_hw); +clean_plane: + kfree(pdpu); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h new file mode 100644 index 000000000..b7b1b0519 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef _DPU_PLANE_H_ +#define _DPU_PLANE_H_ + +#include + +#include "dpu_kms.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_sspp.h" + +/** + * struct dpu_plane_state: Define dpu extension of drm plane state object + * @base: base drm plane state object + * @aspace: pointer to address space for input/output buffers + * @stage: assigned by crtc blender + * @needs_qos_remap: qos remap settings need to be updated + * @multirect_index: index of the rectangle of SSPP + * @multirect_mode: parallel or time multiplex multirect mode + * @pending: whether the current update is still pending + * @plane_fetch_bw: calculated BW per plane + * @plane_clk: calculated clk per plane + * @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed + * @rotation: simplified drm rotation hint + */ +struct dpu_plane_state { + struct drm_plane_state base; + struct msm_gem_address_space *aspace; + enum dpu_stage stage; + bool needs_qos_remap; + uint32_t multirect_index; + uint32_t multirect_mode; + bool pending; + + u64 plane_fetch_bw; + u64 plane_clk; + + bool needs_dirtyfb; + unsigned int rotation; +}; + +/** + * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states + * @r0: drm plane configured on rect 0 + * @r1: drm plane configured on rect 1 + */ +struct dpu_multirect_plane_states { + const struct drm_plane_state *r0; + const struct drm_plane_state *r1; +}; + +#define to_dpu_plane_state(x) \ + container_of(x, struct dpu_plane_state, base) + +/** + * dpu_plane_pipe - return sspp identifier for the given plane + * @plane: Pointer to DRM plane object + * Returns: sspp identifier of the given plane + */ +enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane); + +/** + * dpu_plane_flush - final plane operations before commit flush + * @plane: Pointer to drm plane structure + */ +void dpu_plane_flush(struct drm_plane *plane); + +/** + * dpu_plane_set_error: enable/disable error condition + * @plane: pointer to drm_plane structure + */ +void dpu_plane_set_error(struct drm_plane *plane, bool error); + +/** + * dpu_plane_init - create new dpu plane for the given pipe + * @dev: Pointer to DRM device + * @pipe: dpu hardware pipe identifier + * @type: Plane type - PRIMARY/OVERLAY/CURSOR + * @possible_crtcs: bitmask of crtc that can be attached to the given pipe + * + */ +struct drm_plane *dpu_plane_init(struct drm_device *dev, + uint32_t pipe, enum drm_plane_type type, + unsigned long possible_crtcs); + +/** + * dpu_plane_validate_multirecti_v2 - validate the multirect planes + * against hw limitations + * @plane: drm plate states of the multirect pair + */ +int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane); + +/** + * dpu_plane_clear_multirect - clear multirect bits for the given pipe + * @drm_state: Pointer to DRM plane state + */ +void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state); + +/** + * dpu_plane_color_fill - enables color fill on plane + * @plane: Pointer to DRM plane object + * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red + * @alpha: 8-bit fill alpha value, 255 selects 100% alpha + * Returns: 0 on success + */ +int dpu_plane_color_fill(struct drm_plane *plane, + uint32_t color, uint32_t alpha); + +#ifdef CONFIG_DEBUG_FS +void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable); +#else +static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {} +#endif + +#endif /* _DPU_PLANE_H_ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c new file mode 100644 index 000000000..58abf5fe9 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s] " fmt, __func__ +#include "dpu_kms.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_ctl.h" +#include "dpu_hw_pingpong.h" +#include "dpu_hw_intf.h" +#include "dpu_hw_wb.h" +#include "dpu_hw_dspp.h" +#include "dpu_hw_merge3d.h" +#include "dpu_hw_dsc.h" +#include "dpu_encoder.h" +#include "dpu_trace.h" + + +static inline bool reserved_by_other(uint32_t *res_map, int idx, + uint32_t enc_id) +{ + return res_map[idx] && res_map[idx] != enc_id; +} + +/** + * struct dpu_rm_requirements - Reservation requirements parameter bundle + * @topology: selected topology for the display + * @hw_res: Hardware resources required as reported by the encoders + */ +struct dpu_rm_requirements { + struct msm_display_topology topology; +}; + +int dpu_rm_destroy(struct dpu_rm *rm) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { + struct dpu_hw_dspp *hw; + + if (rm->dspp_blks[i]) { + hw = to_dpu_hw_dspp(rm->dspp_blks[i]); + dpu_hw_dspp_destroy(hw); + } + } + for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { + struct dpu_hw_pingpong *hw; + + if (rm->pingpong_blks[i]) { + hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); + dpu_hw_pingpong_destroy(hw); + } + } + for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) { + struct dpu_hw_merge_3d *hw; + + if (rm->merge_3d_blks[i]) { + hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]); + dpu_hw_merge_3d_destroy(hw); + } + } + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { + struct dpu_hw_mixer *hw; + + if (rm->mixer_blks[i]) { + hw = to_dpu_hw_mixer(rm->mixer_blks[i]); + dpu_hw_lm_destroy(hw); + } + } + for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { + struct dpu_hw_ctl *hw; + + if (rm->ctl_blks[i]) { + hw = to_dpu_hw_ctl(rm->ctl_blks[i]); + dpu_hw_ctl_destroy(hw); + } + } + for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++) + dpu_hw_intf_destroy(rm->hw_intf[i]); + + for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) { + struct dpu_hw_dsc *hw; + + if (rm->dsc_blks[i]) { + hw = to_dpu_hw_dsc(rm->dsc_blks[i]); + dpu_hw_dsc_destroy(hw); + } + } + + for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++) + dpu_hw_wb_destroy(rm->hw_wb[i]); + + return 0; +} + +int dpu_rm_init(struct dpu_rm *rm, + const struct dpu_mdss_cfg *cat, + void __iomem *mmio) +{ + int rc, i; + + if (!rm || !cat || !mmio) { + DPU_ERROR("invalid kms\n"); + return -EINVAL; + } + + /* Clear, setup lists */ + memset(rm, 0, sizeof(*rm)); + + /* Interrogate HW catalog and create tracking items for hw blocks */ + for (i = 0; i < cat->mixer_count; i++) { + struct dpu_hw_mixer *hw; + const struct dpu_lm_cfg *lm = &cat->mixer[i]; + + if (lm->pingpong == PINGPONG_MAX) { + DPU_DEBUG("skip mixer %d without pingpong\n", lm->id); + continue; + } + + if (lm->id < LM_0 || lm->id >= LM_MAX) { + DPU_ERROR("skip mixer %d with invalid id\n", lm->id); + continue; + } + hw = dpu_hw_lm_init(lm->id, mmio, cat); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed lm object creation: err %d\n", rc); + goto fail; + } + rm->mixer_blks[lm->id - LM_0] = &hw->base; + } + + for (i = 0; i < cat->merge_3d_count; i++) { + struct dpu_hw_merge_3d *hw; + const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; + + if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) { + DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id); + continue; + } + hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed merge_3d object creation: err %d\n", + rc); + goto fail; + } + rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; + } + + for (i = 0; i < cat->pingpong_count; i++) { + struct dpu_hw_pingpong *hw; + const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; + + if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) { + DPU_ERROR("skip pingpong %d with invalid id\n", pp->id); + continue; + } + hw = dpu_hw_pingpong_init(pp->id, mmio, cat); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed pingpong object creation: err %d\n", + rc); + goto fail; + } + if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) + hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]); + rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; + } + + for (i = 0; i < cat->intf_count; i++) { + struct dpu_hw_intf *hw; + const struct dpu_intf_cfg *intf = &cat->intf[i]; + + if (intf->type == INTF_NONE) { + DPU_DEBUG("skip intf %d with type none\n", i); + continue; + } + if (intf->id < INTF_0 || intf->id >= INTF_MAX) { + DPU_ERROR("skip intf %d with invalid id\n", intf->id); + continue; + } + hw = dpu_hw_intf_init(intf->id, mmio, cat); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed intf object creation: err %d\n", rc); + goto fail; + } + rm->hw_intf[intf->id - INTF_0] = hw; + } + + for (i = 0; i < cat->wb_count; i++) { + struct dpu_hw_wb *hw; + const struct dpu_wb_cfg *wb = &cat->wb[i]; + + if (wb->id < WB_0 || wb->id >= WB_MAX) { + DPU_ERROR("skip intf %d with invalid id\n", wb->id); + continue; + } + + hw = dpu_hw_wb_init(wb->id, mmio, cat); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed wb object creation: err %d\n", rc); + goto fail; + } + rm->hw_wb[wb->id - WB_0] = hw; + } + + for (i = 0; i < cat->ctl_count; i++) { + struct dpu_hw_ctl *hw; + const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; + + if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) { + DPU_ERROR("skip ctl %d with invalid id\n", ctl->id); + continue; + } + hw = dpu_hw_ctl_init(ctl->id, mmio, cat); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed ctl object creation: err %d\n", rc); + goto fail; + } + rm->ctl_blks[ctl->id - CTL_0] = &hw->base; + } + + for (i = 0; i < cat->dspp_count; i++) { + struct dpu_hw_dspp *hw; + const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; + + if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) { + DPU_ERROR("skip dspp %d with invalid id\n", dspp->id); + continue; + } + hw = dpu_hw_dspp_init(dspp->id, mmio, cat); + if (IS_ERR(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed dspp object creation: err %d\n", rc); + goto fail; + } + rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; + } + + for (i = 0; i < cat->dsc_count; i++) { + struct dpu_hw_dsc *hw; + const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; + + hw = dpu_hw_dsc_init(dsc->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed dsc object creation: err %d\n", rc); + goto fail; + } + rm->dsc_blks[dsc->id - DSC_0] = &hw->base; + } + + return 0; + +fail: + dpu_rm_destroy(rm); + + return rc ? rc : -EFAULT; +} + +static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) +{ + return top->num_intf > 1; +} + +/** + * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary + * @rm: dpu resource manager handle + * @primary_idx: index of primary mixer in rm->mixer_blks[] + * @peer_idx: index of other mixer in rm->mixer_blks[] + * Return: true if rm->mixer_blks[peer_idx] is a peer of + * rm->mixer_blks[primary_idx] + */ +static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, + int peer_idx) +{ + const struct dpu_lm_cfg *prim_lm_cfg; + const struct dpu_lm_cfg *peer_cfg; + + prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; + peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap; + + if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) { + DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id, + peer_cfg->id); + return false; + } + return true; +} + +/** + * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets + * proposed use case requirements, incl. hardwired dependent blocks like + * pingpong + * @rm: dpu resource manager handle + * @global_state: resources shared across multiple kms objects + * @enc_id: encoder id requesting for allocation + * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks + * if lm, and all other hardwired blocks connected to the lm (pp) is + * available and appropriate + * @pp_idx: output parameter, index of pingpong block attached to the layer + * mixer in rm->pingpong_blks[]. + * @dspp_idx: output parameter, index of dspp block attached to the layer + * mixer in rm->dspp_blks[]. + * @reqs: input parameter, rm requirements for HW blocks needed in the + * datapath. + * Return: true if lm matches all requirements, false otherwise + */ +static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, + struct dpu_global_state *global_state, + uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx, + struct dpu_rm_requirements *reqs) +{ + const struct dpu_lm_cfg *lm_cfg; + int idx; + + /* Already reserved? */ + if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { + DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); + return false; + } + + lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; + idx = lm_cfg->pingpong - PINGPONG_0; + if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { + DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); + return false; + } + + if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { + DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, + lm_cfg->pingpong); + return false; + } + *pp_idx = idx; + + if (!reqs->topology.num_dspp) + return true; + + idx = lm_cfg->dspp - DSPP_0; + if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) { + DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp); + return false; + } + + if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) { + DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, + lm_cfg->dspp); + return false; + } + *dspp_idx = idx; + + return true; +} + +static int _dpu_rm_reserve_lms(struct dpu_rm *rm, + struct dpu_global_state *global_state, + uint32_t enc_id, + struct dpu_rm_requirements *reqs) + +{ + int lm_idx[MAX_BLOCKS]; + int pp_idx[MAX_BLOCKS]; + int dspp_idx[MAX_BLOCKS] = {0}; + int i, j, lm_count = 0; + + if (!reqs->topology.num_lm) { + DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); + return -EINVAL; + } + + /* Find a primary mixer */ + for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && + lm_count < reqs->topology.num_lm; i++) { + if (!rm->mixer_blks[i]) + continue; + + lm_count = 0; + lm_idx[lm_count] = i; + + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, + enc_id, i, &pp_idx[lm_count], + &dspp_idx[lm_count], reqs)) { + continue; + } + + ++lm_count; + + /* Valid primary mixer found, find matching peers */ + for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) && + lm_count < reqs->topology.num_lm; j++) { + if (!rm->mixer_blks[j]) + continue; + + if (!_dpu_rm_check_lm_peer(rm, i, j)) { + DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j, + LM_0 + i); + continue; + } + + if (!_dpu_rm_check_lm_and_get_connected_blks(rm, + global_state, enc_id, j, + &pp_idx[lm_count], &dspp_idx[lm_count], + reqs)) { + continue; + } + + lm_idx[lm_count] = j; + ++lm_count; + } + } + + if (lm_count != reqs->topology.num_lm) { + DPU_DEBUG("unable to find appropriate mixers\n"); + return -ENAVAIL; + } + + for (i = 0; i < lm_count; i++) { + global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; + global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; + global_state->dspp_to_enc_id[dspp_idx[i]] = + reqs->topology.num_dspp ? enc_id : 0; + + trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, + pp_idx[i] + PINGPONG_0); + } + + return 0; +} + +static int _dpu_rm_reserve_ctls( + struct dpu_rm *rm, + struct dpu_global_state *global_state, + uint32_t enc_id, + const struct msm_display_topology *top) +{ + int ctl_idx[MAX_BLOCKS]; + int i = 0, j, num_ctls; + bool needs_split_display; + + /* each hw_intf needs its own hw_ctrl to program its control path */ + num_ctls = top->num_intf; + + needs_split_display = _dpu_rm_needs_split_display(top); + + for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { + const struct dpu_hw_ctl *ctl; + unsigned long features; + bool has_split_display; + + if (!rm->ctl_blks[j]) + continue; + if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) + continue; + + ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); + features = ctl->caps->features; + has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; + + DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features); + + if (needs_split_display != has_split_display) + continue; + + ctl_idx[i] = j; + DPU_DEBUG("ctl %d match\n", j + CTL_0); + + if (++i == num_ctls) + break; + + } + + if (i != num_ctls) + return -ENAVAIL; + + for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { + global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; + trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); + } + + return 0; +} + +static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, + struct dpu_global_state *global_state, + struct drm_encoder *enc, + const struct msm_display_topology *top) +{ + int num_dsc = top->num_dsc; + int i; + + /* check if DSC required are allocated or not */ + for (i = 0; i < num_dsc; i++) { + if (global_state->dsc_to_enc_id[i]) { + DPU_ERROR("DSC %d is already allocated\n", i); + return -EIO; + } + } + + for (i = 0; i < num_dsc; i++) + global_state->dsc_to_enc_id[i] = enc->base.id; + + return 0; +} + +static int _dpu_rm_make_reservation( + struct dpu_rm *rm, + struct dpu_global_state *global_state, + struct drm_encoder *enc, + struct dpu_rm_requirements *reqs) +{ + int ret; + + ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); + if (ret) { + DPU_ERROR("unable to find appropriate mixers\n"); + return ret; + } + + ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, + &reqs->topology); + if (ret) { + DPU_ERROR("unable to find appropriate CTL\n"); + return ret; + } + + ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology); + if (ret) + return ret; + + return ret; +} + +static int _dpu_rm_populate_requirements( + struct drm_encoder *enc, + struct dpu_rm_requirements *reqs, + struct msm_display_topology req_topology) +{ + reqs->topology = req_topology; + + DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n", + reqs->topology.num_lm, reqs->topology.num_enc, + reqs->topology.num_intf); + + return 0; +} + +static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, + uint32_t enc_id) +{ + int i; + + for (i = 0; i < cnt; i++) { + if (res_mapping[i] == enc_id) + res_mapping[i] = 0; + } +} + +void dpu_rm_release(struct dpu_global_state *global_state, + struct drm_encoder *enc) +{ + _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, + ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, + ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, + ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, + ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, + ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); +} + +int dpu_rm_reserve( + struct dpu_rm *rm, + struct dpu_global_state *global_state, + struct drm_encoder *enc, + struct drm_crtc_state *crtc_state, + struct msm_display_topology topology) +{ + struct dpu_rm_requirements reqs; + int ret; + + /* Check if this is just a page-flip */ + if (!drm_atomic_crtc_needs_modeset(crtc_state)) + return 0; + + if (IS_ERR(global_state)) { + DPU_ERROR("failed to global state\n"); + return PTR_ERR(global_state); + } + + DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", + enc->base.id, crtc_state->crtc->base.id); + + ret = _dpu_rm_populate_requirements(enc, &reqs, topology); + if (ret) { + DPU_ERROR("failed to populate hw requirements\n"); + return ret; + } + + ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); + if (ret) + DPU_ERROR("failed to reserve hw resources: %d\n", ret); + + + + return ret; +} + +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) +{ + struct dpu_hw_blk **hw_blks; + uint32_t *hw_to_enc_id; + int i, num_blks, max_blks; + + switch (type) { + case DPU_HW_BLK_PINGPONG: + hw_blks = rm->pingpong_blks; + hw_to_enc_id = global_state->pingpong_to_enc_id; + max_blks = ARRAY_SIZE(rm->pingpong_blks); + break; + case DPU_HW_BLK_LM: + hw_blks = rm->mixer_blks; + hw_to_enc_id = global_state->mixer_to_enc_id; + max_blks = ARRAY_SIZE(rm->mixer_blks); + break; + case DPU_HW_BLK_CTL: + hw_blks = rm->ctl_blks; + hw_to_enc_id = global_state->ctl_to_enc_id; + max_blks = ARRAY_SIZE(rm->ctl_blks); + break; + case DPU_HW_BLK_DSPP: + hw_blks = rm->dspp_blks; + hw_to_enc_id = global_state->dspp_to_enc_id; + max_blks = ARRAY_SIZE(rm->dspp_blks); + break; + case DPU_HW_BLK_DSC: + hw_blks = rm->dsc_blks; + hw_to_enc_id = global_state->dsc_to_enc_id; + max_blks = ARRAY_SIZE(rm->dsc_blks); + break; + default: + DPU_ERROR("blk type %d not managed by rm\n", type); + return 0; + } + + num_blks = 0; + for (i = 0; i < max_blks; i++) { + if (hw_to_enc_id[i] != enc_id) + continue; + + if (num_blks == blks_size) { + DPU_ERROR("More than %d resources assigned to enc %d\n", + blks_size, enc_id); + break; + } + if (!hw_blks[i]) { + DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n", + type, enc_id); + break; + } + blks[num_blks++] = hw_blks[i]; + } + + return num_blks; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h new file mode 100644 index 000000000..59de72b38 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __DPU_RM_H__ +#define __DPU_RM_H__ + +#include + +#include "msm_kms.h" +#include "dpu_hw_top.h" + +struct dpu_global_state; + +/** + * struct dpu_rm - DPU dynamic hardware resource manager + * @pingpong_blks: array of pingpong hardware resources + * @mixer_blks: array of layer mixer hardware resources + * @ctl_blks: array of ctl hardware resources + * @hw_intf: array of intf hardware resources + * @hw_wb: array of wb hardware resources + * @dspp_blks: array of dspp hardware resources + */ +struct dpu_rm { + struct dpu_hw_blk *pingpong_blks[PINGPONG_MAX - PINGPONG_0]; + struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0]; + struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; + struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0]; + struct dpu_hw_wb *hw_wb[WB_MAX - WB_0]; + struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0]; + struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0]; + struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0]; +}; + +/** + * dpu_rm_init - Read hardware catalog and create reservation tracking objects + * for all HW blocks. + * @rm: DPU Resource Manager handle + * @cat: Pointer to hardware catalog + * @mmio: mapped register io address of MDP + * @Return: 0 on Success otherwise -ERROR + */ +int dpu_rm_init(struct dpu_rm *rm, + const struct dpu_mdss_cfg *cat, + void __iomem *mmio); + +/** + * dpu_rm_destroy - Free all memory allocated by dpu_rm_init + * @rm: DPU Resource Manager handle + * @Return: 0 on Success otherwise -ERROR + */ +int dpu_rm_destroy(struct dpu_rm *rm); + +/** + * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze + * the use connections and user requirements, specified through related + * topology control properties, and reserve hardware blocks to that + * display chain. + * HW blocks can then be accessed through dpu_rm_get_* functions. + * HW Reservations should be released via dpu_rm_release_hw. + * @rm: DPU Resource Manager handle + * @drm_enc: DRM Encoder handle + * @crtc_state: Proposed Atomic DRM CRTC State handle + * @topology: Pointer to topology info for the display + * @Return: 0 on Success otherwise -ERROR + */ +int dpu_rm_reserve(struct dpu_rm *rm, + struct dpu_global_state *global_state, + struct drm_encoder *drm_enc, + struct drm_crtc_state *crtc_state, + struct msm_display_topology topology); + +/** + * dpu_rm_reserve - Given the encoder for the display chain, release any + * HW blocks previously reserved for that use case. + * @rm: DPU Resource Manager handle + * @enc: DRM Encoder handle + * @Return: 0 on Success otherwise -ERROR + */ +void dpu_rm_release(struct dpu_global_state *global_state, + struct drm_encoder *enc); + +/** + * Get hw resources of the given type that are assigned to this encoder. + */ +int dpu_rm_get_assigned_resources(struct dpu_rm *rm, + struct dpu_global_state *global_state, uint32_t enc_id, + enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size); + +/** + * dpu_rm_get_intf - Return a struct dpu_hw_intf instance given it's index. + * @rm: DPU Resource Manager handle + * @intf_idx: INTF's index + */ +static inline struct dpu_hw_intf *dpu_rm_get_intf(struct dpu_rm *rm, enum dpu_intf intf_idx) +{ + return rm->hw_intf[intf_idx - INTF_0]; +} + +/** + * dpu_rm_get_wb - Return a struct dpu_hw_wb instance given it's index. + * @rm: DPU Resource Manager handle + * @wb_idx: WB index + */ +static inline struct dpu_hw_wb *dpu_rm_get_wb(struct dpu_rm *rm, enum dpu_wb wb_idx) +{ + return rm->hw_wb[wb_idx - WB_0]; +} + +#endif /* __DPU_RM_H__ */ + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h new file mode 100644 index 000000000..76169f406 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h @@ -0,0 +1,978 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + */ + +#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _DPU_TRACE_H_ + +#include +#include +#include + +#include +#include "dpu_crtc.h" +#include "dpu_encoder_phys.h" +#include "dpu_hw_mdss.h" +#include "dpu_hw_vbif.h" +#include "dpu_plane.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dpu +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE dpu_trace + +TRACE_EVENT(dpu_perf_set_qos_luts, + TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl, + u32 lut, u32 lut_usage), + TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage), + TP_STRUCT__entry( + __field(u32, pnum) + __field(u32, fmt) + __field(bool, rt) + __field(u32, fl) + __field(u64, lut) + __field(u32, lut_usage) + ), + TP_fast_assign( + __entry->pnum = pnum; + __entry->fmt = fmt; + __entry->rt = rt; + __entry->fl = fl; + __entry->lut = lut; + __entry->lut_usage = lut_usage; + ), + TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d", + __entry->pnum, __entry->fmt, + __entry->rt, __entry->fl, + __entry->lut, __entry->lut_usage) +); + +TRACE_EVENT(dpu_perf_set_danger_luts, + TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut, + u32 safe_lut), + TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut), + TP_STRUCT__entry( + __field(u32, pnum) + __field(u32, fmt) + __field(u32, mode) + __field(u32, danger_lut) + __field(u32, safe_lut) + ), + TP_fast_assign( + __entry->pnum = pnum; + __entry->fmt = fmt; + __entry->mode = mode; + __entry->danger_lut = danger_lut; + __entry->safe_lut = safe_lut; + ), + TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]", + __entry->pnum, __entry->fmt, + __entry->mode, __entry->danger_lut, + __entry->safe_lut) +); + +TRACE_EVENT(dpu_perf_set_ot, + TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx), + TP_ARGS(pnum, xin_id, rd_lim, vbif_idx), + TP_STRUCT__entry( + __field(u32, pnum) + __field(u32, xin_id) + __field(u32, rd_lim) + __field(u32, vbif_idx) + ), + TP_fast_assign( + __entry->pnum = pnum; + __entry->xin_id = xin_id; + __entry->rd_lim = rd_lim; + __entry->vbif_idx = vbif_idx; + ), + TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d", + __entry->pnum, __entry->xin_id, __entry->rd_lim, + __entry->vbif_idx) +) + +TRACE_EVENT(dpu_cmd_release_bw, + TP_PROTO(u32 crtc_id), + TP_ARGS(crtc_id), + TP_STRUCT__entry( + __field(u32, crtc_id) + ), + TP_fast_assign( + __entry->crtc_id = crtc_id; + ), + TP_printk("crtc:%d", __entry->crtc_id) +); + +TRACE_EVENT(tracing_mark_write, + TP_PROTO(int pid, const char *name, bool trace_begin), + TP_ARGS(pid, name, trace_begin), + TP_STRUCT__entry( + __field(int, pid) + __string(trace_name, name) + __field(bool, trace_begin) + ), + TP_fast_assign( + __entry->pid = pid; + __assign_str(trace_name, name); + __entry->trace_begin = trace_begin; + ), + TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E", + __entry->pid, __get_str(trace_name)) +) + +TRACE_EVENT(dpu_trace_counter, + TP_PROTO(int pid, char *name, int value), + TP_ARGS(pid, name, value), + TP_STRUCT__entry( + __field(int, pid) + __string(counter_name, name) + __field(int, value) + ), + TP_fast_assign( + __entry->pid = current->tgid; + __assign_str(counter_name, name); + __entry->value = value; + ), + TP_printk("%d|%s|%d", __entry->pid, + __get_str(counter_name), __entry->value) +) + +TRACE_EVENT(dpu_perf_crtc_update, + TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate, + bool stop_req, bool update_bus, bool update_clk), + TP_ARGS(crtc, bw_ctl, core_clk_rate, stop_req, update_bus, update_clk), + TP_STRUCT__entry( + __field(u32, crtc) + __field(u64, bw_ctl) + __field(u32, core_clk_rate) + __field(bool, stop_req) + __field(u32, update_bus) + __field(u32, update_clk) + ), + TP_fast_assign( + __entry->crtc = crtc; + __entry->bw_ctl = bw_ctl; + __entry->core_clk_rate = core_clk_rate; + __entry->stop_req = stop_req; + __entry->update_bus = update_bus; + __entry->update_clk = update_clk; + ), + TP_printk( + "crtc=%d bw_ctl=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d", + __entry->crtc, + __entry->bw_ctl, + __entry->core_clk_rate, + __entry->stop_req, + __entry->update_bus, + __entry->update_clk) +); + +DECLARE_EVENT_CLASS(dpu_irq_template, + TP_PROTO(int irq_idx), + TP_ARGS(irq_idx), + TP_STRUCT__entry( + __field( int, irq_idx ) + ), + TP_fast_assign( + __entry->irq_idx = irq_idx; + ), + TP_printk("irq=%d", __entry->irq_idx) +); +DEFINE_EVENT(dpu_irq_template, dpu_irq_register_success, + TP_PROTO(int irq_idx), + TP_ARGS(irq_idx) +); +DEFINE_EVENT(dpu_irq_template, dpu_irq_unregister_success, + TP_PROTO(int irq_idx), + TP_ARGS(irq_idx) +); + +TRACE_EVENT(dpu_enc_irq_wait_success, + TP_PROTO(uint32_t drm_id, void *func, + int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt), + TP_ARGS(drm_id, func, irq_idx, pp_idx, atomic_cnt), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( void *, func ) + __field( int, irq_idx ) + __field( enum dpu_pingpong, pp_idx ) + __field( int, atomic_cnt ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->func = func; + __entry->irq_idx = irq_idx; + __entry->pp_idx = pp_idx; + __entry->atomic_cnt = atomic_cnt; + ), + TP_printk("id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d", + __entry->drm_id, __entry->func, + __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt) +); + +DECLARE_EVENT_CLASS(dpu_drm_obj_template, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + ), + TP_printk("id=%u", __entry->drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); +DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume, + TP_PROTO(uint32_t drm_id), + TP_ARGS(drm_id) +); + +TRACE_EVENT(dpu_enc_enable, + TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay), + TP_ARGS(drm_id, hdisplay, vdisplay), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( int, hdisplay ) + __field( int, vdisplay ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->hdisplay = hdisplay; + __entry->vdisplay = vdisplay; + ), + TP_printk("id=%u, mode=%dx%d", + __entry->drm_id, __entry->hdisplay, __entry->vdisplay) +); + +DECLARE_EVENT_CLASS(dpu_enc_keyval_template, + TP_PROTO(uint32_t drm_id, int val), + TP_ARGS(drm_id, val), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( int, val ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->val = val; + ), + TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val) +); +DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb, + TP_PROTO(uint32_t drm_id, int count), + TP_ARGS(drm_id, count) +); +DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start, + TP_PROTO(uint32_t drm_id, int ctl_idx), + TP_ARGS(drm_id, ctl_idx) +); + +TRACE_EVENT(dpu_enc_atomic_check_flags, + TP_PROTO(uint32_t drm_id, unsigned int flags), + TP_ARGS(drm_id, flags), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( unsigned int, flags ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->flags = flags; + ), + TP_printk("id=%u, flags=%u", + __entry->drm_id, __entry->flags) +); + +DECLARE_EVENT_CLASS(dpu_enc_id_enable_template, + TP_PROTO(uint32_t drm_id, bool enable), + TP_ARGS(drm_id, enable), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( bool, enable ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->enable = enable; + ), + TP_printk("id=%u, enable=%s", + __entry->drm_id, __entry->enable ? "true" : "false") +); +DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper, + TP_PROTO(uint32_t drm_id, bool enable), + TP_ARGS(drm_id, enable) +); +DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb, + TP_PROTO(uint32_t drm_id, bool enable), + TP_ARGS(drm_id, enable) +); +DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb, + TP_PROTO(uint32_t drm_id, bool enable), + TP_ARGS(drm_id, enable) +); +DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te, + TP_PROTO(uint32_t drm_id, bool enable), + TP_ARGS(drm_id, enable) +); + +TRACE_EVENT(dpu_enc_rc, + TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported, + int rc_state, const char *stage), + TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( u32, sw_event ) + __field( bool, idle_pc_supported ) + __field( int, rc_state ) + __string( stage_str, stage ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->sw_event = sw_event; + __entry->idle_pc_supported = idle_pc_supported; + __entry->rc_state = rc_state; + __assign_str(stage_str, stage); + ), + TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d", + __get_str(stage_str), __entry->drm_id, __entry->sw_event, + __entry->idle_pc_supported ? "true" : "false", + __entry->rc_state) +); + +TRACE_EVENT(dpu_enc_frame_done_cb_not_busy, + TP_PROTO(uint32_t drm_id, u32 event, char *intf_mode, enum dpu_intf intf_idx, + enum dpu_wb wb_idx), + TP_ARGS(drm_id, event, intf_mode, intf_idx, wb_idx), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( u32, event ) + __string( intf_mode_str, intf_mode ) + __field( enum dpu_intf, intf_idx ) + __field( enum dpu_wb, wb_idx ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->event = event; + __assign_str(intf_mode_str, intf_mode); + __entry->intf_idx = intf_idx; + __entry->wb_idx = wb_idx; + ), + TP_printk("id=%u, event=%u, intf_mode=%s intf=%d wb=%d", __entry->drm_id, + __entry->event, __get_str(intf_mode_str), + __entry->intf_idx, __entry->wb_idx) +); + +TRACE_EVENT(dpu_enc_frame_done_cb, + TP_PROTO(uint32_t drm_id, unsigned int idx, + unsigned long frame_busy_mask), + TP_ARGS(drm_id, idx, frame_busy_mask), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( unsigned int, idx ) + __field( unsigned long, frame_busy_mask ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->idx = idx; + __entry->frame_busy_mask = frame_busy_mask; + ), + TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id, + __entry->idx, __entry->frame_busy_mask) +); + +TRACE_EVENT(dpu_enc_trigger_flush, + TP_PROTO(uint32_t drm_id, char *intf_mode, enum dpu_intf intf_idx, enum dpu_wb wb_idx, + int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits, + u32 pending_flush_ret), + TP_ARGS(drm_id, intf_mode, intf_idx, wb_idx, pending_kickoff_cnt, ctl_idx, + extra_flush_bits, pending_flush_ret), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __string( intf_mode_str, intf_mode ) + __field( enum dpu_intf, intf_idx ) + __field( enum dpu_wb, wb_idx ) + __field( int, pending_kickoff_cnt ) + __field( int, ctl_idx ) + __field( u32, extra_flush_bits ) + __field( u32, pending_flush_ret ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __assign_str(intf_mode_str, intf_mode); + __entry->intf_idx = intf_idx; + __entry->wb_idx = wb_idx; + __entry->pending_kickoff_cnt = pending_kickoff_cnt; + __entry->ctl_idx = ctl_idx; + __entry->extra_flush_bits = extra_flush_bits; + __entry->pending_flush_ret = pending_flush_ret; + ), + TP_printk("id=%u, intf_mode=%s, intf_idx=%d, wb_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d " + "extra_flush_bits=0x%x pending_flush_ret=0x%x", + __entry->drm_id, __get_str(intf_mode_str), __entry->intf_idx, __entry->wb_idx, + __entry->pending_kickoff_cnt, __entry->ctl_idx, + __entry->extra_flush_bits, __entry->pending_flush_ret) +); + +DECLARE_EVENT_CLASS(dpu_enc_ktime_template, + TP_PROTO(uint32_t drm_id, ktime_t time), + TP_ARGS(drm_id, time), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( ktime_t, time ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->time = time; + ), + TP_printk("id=%u, time=%lld", __entry->drm_id, + ktime_to_ms(__entry->time)) +); +DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work, + TP_PROTO(uint32_t drm_id, ktime_t time), + TP_ARGS(drm_id, time) +); +DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff, + TP_PROTO(uint32_t drm_id, ktime_t time), + TP_ARGS(drm_id, time) +); + +DECLARE_EVENT_CLASS(dpu_id_event_template, + TP_PROTO(uint32_t drm_id, u32 event), + TP_ARGS(drm_id, event), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( u32, event ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->event = event; + ), + TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event) +); +DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout, + TP_PROTO(uint32_t drm_id, u32 event), + TP_ARGS(drm_id, event) +); +DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb, + TP_PROTO(uint32_t drm_id, u32 event), + TP_ARGS(drm_id, event) +); +DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done, + TP_PROTO(uint32_t drm_id, u32 event), + TP_ARGS(drm_id, event) +); +DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending, + TP_PROTO(uint32_t drm_id, u32 event), + TP_ARGS(drm_id, event) +); + +TRACE_EVENT(dpu_enc_wait_event_timeout, + TP_PROTO(uint32_t drm_id, int irq_idx, int rc, s64 time, + s64 expected_time, int atomic_cnt), + TP_ARGS(drm_id, irq_idx, rc, time, expected_time, atomic_cnt), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( int, irq_idx ) + __field( int, rc ) + __field( s64, time ) + __field( s64, expected_time ) + __field( int, atomic_cnt ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->irq_idx = irq_idx; + __entry->rc = rc; + __entry->time = time; + __entry->expected_time = expected_time; + __entry->atomic_cnt = atomic_cnt; + ), + TP_printk("id=%u, irq_idx=%d, rc=%d, time=%lld, expected=%lld cnt=%d", + __entry->drm_id, __entry->irq_idx, __entry->rc, __entry->time, + __entry->expected_time, __entry->atomic_cnt) +); + +TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl, + TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable, + int refcnt), + TP_ARGS(drm_id, pp, enable, refcnt), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( enum dpu_pingpong, pp ) + __field( bool, enable ) + __field( int, refcnt ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->pp = pp; + __entry->enable = enable; + __entry->refcnt = refcnt; + ), + TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id, + __entry->pp, __entry->enable ? "true" : "false", + __entry->refcnt) +); + +TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done, + TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count, + u32 event), + TP_ARGS(drm_id, pp, new_count, event), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( enum dpu_pingpong, pp ) + __field( int, new_count ) + __field( u32, event ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->pp = pp; + __entry->new_count = new_count; + __entry->event = event; + ), + TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id, + __entry->pp, __entry->new_count, __entry->event) +); + +TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout, + TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count, + int kickoff_count, u32 event), + TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( enum dpu_pingpong, pp ) + __field( int, timeout_count ) + __field( int, kickoff_count ) + __field( u32, event ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->pp = pp; + __entry->timeout_count = timeout_count; + __entry->kickoff_count = kickoff_count; + __entry->event = event; + ), + TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u", + __entry->drm_id, __entry->pp, __entry->timeout_count, + __entry->kickoff_count, __entry->event) +); + +TRACE_EVENT(dpu_enc_phys_vid_post_kickoff, + TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx), + TP_ARGS(drm_id, intf_idx), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( enum dpu_intf, intf_idx ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->intf_idx = intf_idx; + ), + TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx) +); + +TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl, + TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable, + int refcnt), + TP_ARGS(drm_id, intf_idx, enable, refcnt), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( enum dpu_intf, intf_idx ) + __field( bool, enable ) + __field( int, refcnt ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->intf_idx = intf_idx; + __entry->enable = enable; + __entry->refcnt = refcnt; + ), + TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id, + __entry->intf_idx, __entry->enable ? "true" : "false", + __entry->drm_id) +); + +TRACE_EVENT(dpu_crtc_setup_mixer, + TP_PROTO(uint32_t crtc_id, uint32_t plane_id, + struct drm_plane_state *state, struct dpu_plane_state *pstate, + uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format, + uint64_t modifier), + TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp, + pixel_format, modifier), + TP_STRUCT__entry( + __field( uint32_t, crtc_id ) + __field( uint32_t, plane_id ) + __field( uint32_t, fb_id ) + __field_struct( struct drm_rect, src_rect ) + __field_struct( struct drm_rect, dst_rect ) + __field( uint32_t, stage_idx ) + __field( enum dpu_stage, stage ) + __field( enum dpu_sspp, sspp ) + __field( uint32_t, multirect_idx ) + __field( uint32_t, multirect_mode ) + __field( uint32_t, pixel_format ) + __field( uint64_t, modifier ) + ), + TP_fast_assign( + __entry->crtc_id = crtc_id; + __entry->plane_id = plane_id; + __entry->fb_id = state ? state->fb->base.id : 0; + __entry->src_rect = drm_plane_state_src(state); + __entry->dst_rect = drm_plane_state_dest(state); + __entry->stage_idx = stage_idx; + __entry->stage = pstate->stage; + __entry->sspp = sspp; + __entry->multirect_idx = pstate->multirect_index; + __entry->multirect_mode = pstate->multirect_mode; + __entry->pixel_format = pixel_format; + __entry->modifier = modifier; + ), + TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT + " dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d " + "multirect_index:%d multirect_mode:%u pix_format:%u " + "modifier:%llu", + __entry->crtc_id, __entry->plane_id, __entry->fb_id, + DRM_RECT_FP_ARG(&__entry->src_rect), + DRM_RECT_ARG(&__entry->dst_rect), + __entry->stage_idx, __entry->stage, __entry->sspp, + __entry->multirect_idx, __entry->multirect_mode, + __entry->pixel_format, __entry->modifier) +); + +TRACE_EVENT(dpu_crtc_setup_lm_bounds, + TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds), + TP_ARGS(drm_id, mixer, bounds), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( int, mixer ) + __field_struct( struct drm_rect, bounds ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->mixer = mixer; + __entry->bounds = *bounds; + ), + TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id, + __entry->mixer, DRM_RECT_ARG(&__entry->bounds)) +); + +TRACE_EVENT(dpu_crtc_vblank_enable, + TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable, + struct dpu_crtc *crtc), + TP_ARGS(drm_id, enc_id, enable, crtc), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( uint32_t, enc_id ) + __field( bool, enable ) + __field( bool, enabled ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->enc_id = enc_id; + __entry->enable = enable; + __entry->enabled = crtc->enabled; + ), + TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}", + __entry->drm_id, __entry->enc_id, + __entry->enable ? "true" : "false", + __entry->enabled ? "true" : "false") +); + +DECLARE_EVENT_CLASS(dpu_crtc_enable_template, + TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc), + TP_ARGS(drm_id, enable, crtc), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( bool, enable ) + __field( bool, enabled ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->enable = enable; + __entry->enabled = crtc->enabled; + ), + TP_printk("id:%u enable:%s state{enabled:%s}", + __entry->drm_id, __entry->enable ? "true" : "false", + __entry->enabled ? "true" : "false") +); +DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable, + TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc), + TP_ARGS(drm_id, enable, crtc) +); +DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable, + TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc), + TP_ARGS(drm_id, enable, crtc) +); +DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank, + TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc), + TP_ARGS(drm_id, enable, crtc) +); + +TRACE_EVENT(dpu_crtc_disable_frame_pending, + TP_PROTO(uint32_t drm_id, int frame_pending), + TP_ARGS(drm_id, frame_pending), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( int, frame_pending ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->frame_pending = frame_pending; + ), + TP_printk("id:%u frame_pending:%d", __entry->drm_id, + __entry->frame_pending) +); + +TRACE_EVENT(dpu_plane_set_scanout, + TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout, + enum dpu_sspp_multirect_index multirect_index), + TP_ARGS(index, layout, multirect_index), + TP_STRUCT__entry( + __field( enum dpu_sspp, index ) + __field_struct( struct dpu_hw_fmt_layout, layout ) + __field( enum dpu_sspp_multirect_index, multirect_index) + ), + TP_fast_assign( + __entry->index = index; + __entry->layout = *layout; + __entry->multirect_index = multirect_index; + ), + TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} " + "multirect_index:%d", __entry->index, __entry->layout.width, + __entry->layout.height, __entry->layout.plane_addr[0], + __entry->layout.plane_size[0], + __entry->layout.plane_addr[1], + __entry->layout.plane_size[1], + __entry->layout.plane_addr[2], + __entry->layout.plane_size[2], + __entry->layout.plane_addr[3], + __entry->layout.plane_size[3], __entry->multirect_index) +); + +TRACE_EVENT(dpu_plane_disable, + TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode), + TP_ARGS(drm_id, is_virtual, multirect_mode), + TP_STRUCT__entry( + __field( uint32_t, drm_id ) + __field( bool, is_virtual ) + __field( uint32_t, multirect_mode ) + ), + TP_fast_assign( + __entry->drm_id = drm_id; + __entry->is_virtual = is_virtual; + __entry->multirect_mode = multirect_mode; + ), + TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id, + __entry->is_virtual ? "true" : "false", + __entry->multirect_mode) +); + +DECLARE_EVENT_CLASS(dpu_rm_iter_template, + TP_PROTO(uint32_t id, uint32_t enc_id), + TP_ARGS(id, enc_id), + TP_STRUCT__entry( + __field( uint32_t, id ) + __field( uint32_t, enc_id ) + ), + TP_fast_assign( + __entry->id = id; + __entry->enc_id = enc_id; + ), + TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id) +); +DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf, + TP_PROTO(uint32_t id, uint32_t enc_id), + TP_ARGS(id, enc_id) +); +DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls, + TP_PROTO(uint32_t id, uint32_t enc_id), + TP_ARGS(id, enc_id) +); + +TRACE_EVENT(dpu_rm_reserve_lms, + TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id), + TP_ARGS(id, enc_id, pp_id), + TP_STRUCT__entry( + __field( uint32_t, id ) + __field( uint32_t, enc_id ) + __field( uint32_t, pp_id ) + ), + TP_fast_assign( + __entry->id = id; + __entry->enc_id = enc_id; + __entry->pp_id = pp_id; + ), + TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id, + __entry->enc_id, __entry->pp_id) +); + +TRACE_EVENT(dpu_vbif_wait_xin_halt_fail, + TP_PROTO(enum dpu_vbif index, u32 xin_id), + TP_ARGS(index, xin_id), + TP_STRUCT__entry( + __field( enum dpu_vbif, index ) + __field( u32, xin_id ) + ), + TP_fast_assign( + __entry->index = index; + __entry->xin_id = xin_id; + ), + TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id) +); + +TRACE_EVENT(dpu_pp_connect_ext_te, + TP_PROTO(enum dpu_pingpong pp, u32 cfg), + TP_ARGS(pp, cfg), + TP_STRUCT__entry( + __field( enum dpu_pingpong, pp ) + __field( u32, cfg ) + ), + TP_fast_assign( + __entry->pp = pp; + __entry->cfg = cfg; + ), + TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg) +); + +TRACE_EVENT(dpu_core_irq_register_callback, + TP_PROTO(int irq_idx, void *callback), + TP_ARGS(irq_idx, callback), + TP_STRUCT__entry( + __field( int, irq_idx ) + __field( void *, callback) + ), + TP_fast_assign( + __entry->irq_idx = irq_idx; + __entry->callback = callback; + ), + TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx, + __entry->callback) +); + +TRACE_EVENT(dpu_core_irq_unregister_callback, + TP_PROTO(int irq_idx), + TP_ARGS(irq_idx), + TP_STRUCT__entry( + __field( int, irq_idx ) + ), + TP_fast_assign( + __entry->irq_idx = irq_idx; + ), + TP_printk("irq_idx:%d", __entry->irq_idx) +); + +TRACE_EVENT(dpu_core_perf_update_clk, + TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate), + TP_ARGS(dev, stop_req, clk_rate), + TP_STRUCT__entry( + __string( dev_name, dev->unique ) + __field( bool, stop_req ) + __field( u64, clk_rate ) + ), + TP_fast_assign( + __assign_str(dev_name, dev->unique); + __entry->stop_req = stop_req; + __entry->clk_rate = clk_rate; + ), + TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name), + __entry->stop_req ? "true" : "false", __entry->clk_rate) +); + +TRACE_EVENT(dpu_hw_ctl_update_pending_flush, + TP_PROTO(u32 new_bits, u32 pending_mask), + TP_ARGS(new_bits, pending_mask), + TP_STRUCT__entry( + __field( u32, new_bits ) + __field( u32, pending_mask ) + ), + TP_fast_assign( + __entry->new_bits = new_bits; + __entry->pending_mask = pending_mask; + ), + TP_printk("new=%x existing=%x", __entry->new_bits, + __entry->pending_mask) +); + +DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template, + TP_PROTO(u32 pending_mask, u32 ctl_flush), + TP_ARGS(pending_mask, ctl_flush), + TP_STRUCT__entry( + __field( u32, pending_mask ) + __field( u32, ctl_flush ) + ), + TP_fast_assign( + __entry->pending_mask = pending_mask; + __entry->ctl_flush = ctl_flush; + ), + TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask, + __entry->ctl_flush) +); +DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush, + TP_PROTO(u32 pending_mask, u32 ctl_flush), + TP_ARGS(pending_mask, ctl_flush) +); +DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, + dpu_hw_ctl_trigger_pending_flush, + TP_PROTO(u32 pending_mask, u32 ctl_flush), + TP_ARGS(pending_mask, ctl_flush) +); +DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare, + TP_PROTO(u32 pending_mask, u32 ctl_flush), + TP_ARGS(pending_mask, ctl_flush) +); +DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start, + TP_PROTO(u32 pending_mask, u32 ctl_flush), + TP_ARGS(pending_mask, ctl_flush) +); + +#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0) +#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1) +#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__) + +#define DPU_ATRACE_INT(name, value) \ + trace_dpu_trace_counter(current->tgid, name, value) + +#endif /* _DPU_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c new file mode 100644 index 000000000..1305e250b --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include +#include + +#include "dpu_vbif.h" +#include "dpu_hw_vbif.h" +#include "dpu_trace.h" + +static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx) +{ + if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif)) + return dpu_kms->hw_vbif[vbif_idx]; + + return NULL; +} + +static const char *dpu_vbif_name(enum dpu_vbif idx) +{ + switch (idx) { + case VBIF_RT: + return "VBIF_RT"; + case VBIF_NRT: + return "VBIF_NRT"; + default: + return "??"; + } +} + +/** + * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt + * @vbif: Pointer to hardware vbif driver + * @xin_id: Client interface identifier + * @return: 0 if success; error code otherwise + */ +static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id) +{ + ktime_t timeout; + bool status; + int rc; + + if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) { + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); + return -EINVAL; + } + + timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout); + for (;;) { + status = vbif->ops.get_halt_ctrl(vbif, xin_id); + if (status) + break; + if (ktime_compare_safe(ktime_get(), timeout) > 0) { + status = vbif->ops.get_halt_ctrl(vbif, xin_id); + break; + } + usleep_range(501, 1000); + } + + if (!status) { + rc = -ETIMEDOUT; + DPU_ERROR("%s client %d not halting. TIMEDOUT.\n", + dpu_vbif_name(vbif->idx), xin_id); + } else { + rc = 0; + DRM_DEBUG_ATOMIC("%s client %d is halted\n", + dpu_vbif_name(vbif->idx), xin_id); + } + + return rc; +} + +/** + * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters + * @vbif: Pointer to hardware vbif driver + * @ot_lim: Pointer to OT limit to be modified + * @params: Pointer to usecase parameters + */ +static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif, + u32 *ot_lim, struct dpu_vbif_set_ot_params *params) +{ + u64 pps; + const struct dpu_vbif_dynamic_ot_tbl *tbl; + u32 i; + + if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM))) + return; + + /* Dynamic OT setting done only for WFD */ + if (!params->is_wfd) + return; + + pps = params->frame_rate; + pps *= params->width; + pps *= params->height; + + tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl : + &vbif->cap->dynamic_ot_wr_tbl; + + for (i = 0; i < tbl->count; i++) { + if (pps <= tbl->cfg[i].pps) { + *ot_lim = tbl->cfg[i].ot_limit; + break; + } + } + + DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n", + dpu_vbif_name(vbif->idx), params->xin_id, + params->width, params->height, params->frame_rate, + pps, *ot_lim); +} + +/** + * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters + * @vbif: Pointer to hardware vbif driver + * @params: Pointer to usecase parameters + * @return: OT limit + */ +static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, + struct dpu_vbif_set_ot_params *params) +{ + u32 ot_lim = 0; + u32 val; + + if (!vbif || !vbif->cap) { + DPU_ERROR("invalid arguments vbif %d\n", vbif != NULL); + return -EINVAL; + } + + if (vbif->cap->default_ot_wr_limit && !params->rd) + ot_lim = vbif->cap->default_ot_wr_limit; + else if (vbif->cap->default_ot_rd_limit && params->rd) + ot_lim = vbif->cap->default_ot_rd_limit; + + /* + * If default ot is not set from dt/catalog, + * then do not configure it. + */ + if (ot_lim == 0) + goto exit; + + /* Modify the limits if the target and the use case requires it */ + _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params); + + if (vbif && vbif->ops.get_limit_conf) { + val = vbif->ops.get_limit_conf(vbif, + params->xin_id, params->rd); + if (val == ot_lim) + ot_lim = 0; + } + +exit: + DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n", + dpu_vbif_name(vbif->idx), params->xin_id, ot_lim); + return ot_lim; +} + +/** + * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters + * @dpu_kms: DPU handler + * @params: Pointer to usecase parameters + * + * Note this function would block waiting for bus halt. + */ +void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, + struct dpu_vbif_set_ot_params *params) +{ + struct dpu_hw_vbif *vbif; + struct dpu_hw_mdp *mdp; + bool forced_on = false; + u32 ot_lim; + int ret; + + mdp = dpu_kms->hw_mdp; + + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); + if (!vbif || !mdp) { + DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n", + vbif != NULL, mdp != NULL); + return; + } + + if (!mdp->ops.setup_clk_force_ctrl || + !vbif->ops.set_limit_conf || + !vbif->ops.set_halt_ctrl) + return; + + /* set write_gather_en for all write clients */ + if (vbif->ops.set_write_gather_en && !params->rd) + vbif->ops.set_write_gather_en(vbif, params->xin_id); + + ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF; + + if (ot_lim == 0) + return; + + trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim, + params->vbif_idx); + + forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true); + + vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim); + + vbif->ops.set_halt_ctrl(vbif, params->xin_id, true); + + ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id); + if (ret) + trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id); + + vbif->ops.set_halt_ctrl(vbif, params->xin_id, false); + + if (forced_on) + mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false); +} + +void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, + struct dpu_vbif_set_qos_params *params) +{ + struct dpu_hw_vbif *vbif; + struct dpu_hw_mdp *mdp; + bool forced_on = false; + const struct dpu_vbif_qos_tbl *qos_tbl; + int i; + + if (!params || !dpu_kms->hw_mdp) { + DPU_ERROR("invalid arguments\n"); + return; + } + mdp = dpu_kms->hw_mdp; + + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); + + if (!vbif || !vbif->cap) { + DPU_ERROR("invalid vbif %d\n", params->vbif_idx); + return; + } + + if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) { + DRM_DEBUG_ATOMIC("qos remap not supported\n"); + return; + } + + qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl : + &vbif->cap->qos_nrt_tbl; + + if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) { + DRM_DEBUG_ATOMIC("qos tbl not defined\n"); + return; + } + + forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true); + + for (i = 0; i < qos_tbl->npriority_lvl; i++) { + DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n", + dpu_vbif_name(params->vbif_idx), params->xin_id, i, + qos_tbl->priority_lvl[i]); + vbif->ops.set_qos_remap(vbif, params->xin_id, i, + qos_tbl->priority_lvl[i]); + } + + if (forced_on) + mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false); +} + +void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) +{ + struct dpu_hw_vbif *vbif; + u32 i, pnd, src; + + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + vbif = dpu_kms->hw_vbif[i]; + if (vbif && vbif->ops.clear_errors) { + vbif->ops.clear_errors(vbif, &pnd, &src); + if (pnd || src) { + DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n", + dpu_vbif_name(vbif->idx), pnd, src); + } + } + } +} + +void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms) +{ + struct dpu_hw_vbif *vbif; + int i, j; + + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + vbif = dpu_kms->hw_vbif[i]; + if (vbif && vbif->cap && vbif->ops.set_mem_type) { + for (j = 0; j < vbif->cap->memtype_count; j++) + vbif->ops.set_mem_type( + vbif, j, vbif->cap->memtype[j]); + } + } +} + +#ifdef CONFIG_DEBUG_FS + +void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root) +{ + char vbif_name[32]; + struct dentry *entry, *debugfs_vbif; + int i, j; + + entry = debugfs_create_dir("vbif", debugfs_root); + + for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { + const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i]; + + snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id); + + debugfs_vbif = debugfs_create_dir(vbif_name, entry); + + debugfs_create_u32("features", 0600, debugfs_vbif, + (u32 *)&vbif->features); + + debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif, + (u32 *)&vbif->xin_halt_timeout); + + debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif, + (u32 *)&vbif->default_ot_rd_limit); + + debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif, + (u32 *)&vbif->default_ot_wr_limit); + + for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) { + const struct dpu_vbif_dynamic_ot_cfg *cfg = + &vbif->dynamic_ot_rd_tbl.cfg[j]; + + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_rd_%d_pps", j); + debugfs_create_u64(vbif_name, 0400, debugfs_vbif, + (u64 *)&cfg->pps); + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_rd_%d_ot_limit", j); + debugfs_create_u32(vbif_name, 0400, debugfs_vbif, + (u32 *)&cfg->ot_limit); + } + + for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) { + const struct dpu_vbif_dynamic_ot_cfg *cfg = + &vbif->dynamic_ot_wr_tbl.cfg[j]; + + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_wr_%d_pps", j); + debugfs_create_u64(vbif_name, 0400, debugfs_vbif, + (u64 *)&cfg->pps); + snprintf(vbif_name, sizeof(vbif_name), + "dynamic_ot_wr_%d_ot_limit", j); + debugfs_create_u32(vbif_name, 0400, debugfs_vbif, + (u32 *)&cfg->ot_limit); + } + } +} +#endif diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h new file mode 100644 index 000000000..ab490177d --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef __DPU_VBIF_H__ +#define __DPU_VBIF_H__ + +#include "dpu_kms.h" + +struct dpu_vbif_set_ot_params { + u32 xin_id; + u32 num; + u32 width; + u32 height; + u32 frame_rate; + bool rd; + bool is_wfd; + u32 vbif_idx; + u32 clk_ctrl; +}; + +struct dpu_vbif_set_memtype_params { + u32 xin_id; + u32 vbif_idx; + u32 clk_ctrl; + bool is_cacheable; +}; + +/** + * struct dpu_vbif_set_qos_params - QoS remapper parameter + * @vbif_idx: vbif identifier + * @xin_id: client interface identifier + * @clk_ctrl: clock control identifier of the xin + * @num: pipe identifier (debug only) + * @is_rt: true if pipe is used in real-time use case + */ +struct dpu_vbif_set_qos_params { + u32 vbif_idx; + u32 xin_id; + u32 clk_ctrl; + u32 num; + bool is_rt; +}; + +/** + * dpu_vbif_set_ot_limit - set OT limit for vbif client + * @dpu_kms: DPU handler + * @params: Pointer to OT configuration parameters + */ +void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, + struct dpu_vbif_set_ot_params *params); + +/** + * dpu_vbif_set_qos_remap - set QoS priority level remap + * @dpu_kms: DPU handler + * @params: Pointer to QoS configuration parameters + */ +void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, + struct dpu_vbif_set_qos_params *params); + +/** + * dpu_vbif_clear_errors - clear any vbif errors + * @dpu_kms: DPU handler + */ +void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms); + +/** + * dpu_vbif_init_memtypes - initialize xin memory types for vbif + * @dpu_kms: DPU handler + */ +void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms); + +void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root); + +#endif /* __DPU_VBIF_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c new file mode 100644 index 000000000..2a5a68366 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include + +#include "dpu_writeback.h" + +static int dpu_wb_conn_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct msm_drm_private *priv = dev->dev_private; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + + /* + * We should ideally be limiting the modes only to the maxlinewidth but + * on some chipsets this will allow even 4k modes to be added which will + * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support + * and source split support added lets limit the modes based on max_mixer_width + * as 4K modes can then be supported. + */ + return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width, + dev->mode_config.max_height); +} + +static const struct drm_connector_funcs dpu_wb_conn_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int dpu_wb_conn_prepare_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + + struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector); + + if (!job->fb) + return 0; + + dpu_encoder_prepare_wb_job(dpu_wb_conn->wb_enc, job); + + return 0; +} + +static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector); + + if (!job->fb) + return; + + dpu_encoder_cleanup_wb_job(dpu_wb_conn->wb_enc, job); +} + +static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = { + .get_modes = dpu_wb_conn_get_modes, + .prepare_writeback_job = dpu_wb_conn_prepare_job, + .cleanup_writeback_job = dpu_wb_conn_cleanup_job, +}; + +int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc, + const u32 *format_list, u32 num_formats) +{ + struct dpu_wb_connector *dpu_wb_conn; + int rc = 0; + + dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL); + if (!dpu_wb_conn) + return -ENOMEM; + + drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs); + + /* DPU initializes the encoder and sets it up completely for writeback + * cases and hence should use the new API drm_writeback_connector_init_with_encoder + * to initialize the writeback connector + */ + rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc, + &dpu_wb_conn_funcs, format_list, num_formats); + + if (!rc) + dpu_wb_conn->wb_enc = enc; + + return rc; +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h new file mode 100644 index 000000000..5a75ea916 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DPU_WRITEBACK_H +#define _DPU_WRITEBACK_H + +#include +#include +#include +#include + +#include "msm_drv.h" +#include "dpu_kms.h" +#include "dpu_encoder_phys.h" + +struct dpu_wb_connector { + struct drm_writeback_connector base; + struct drm_encoder *wb_enc; +}; + +static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn) +{ + return container_of(conn, struct dpu_wb_connector, base); +} + +int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc, + const u32 *format_list, u32 num_formats); + +#endif /*_DPU_WRITEBACK_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h new file mode 100644 index 000000000..9fc9dbde8 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h @@ -0,0 +1,1155 @@ +#ifndef __MEDIA_INFO_H__ +#define __MEDIA_INFO_H__ + +#ifndef MSM_MEDIA_ALIGN +#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\ + ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\ + (((__sz) + (__align) - 1) & (~((__align) - 1)))) +#endif + +#ifndef MSM_MEDIA_ROUNDUP +#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r)) +#endif + +#ifndef MSM_MEDIA_MAX +#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b)) +#endif + +enum color_fmts { + /* Venus NV12: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 128 + * UV_Stride : Width aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Extradata: Arbitrary (software-imposed) padding + * Total size = align((Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines + * + max(Extradata, Y_Stride * 8), 4096) + */ + COLOR_FMT_NV12, + + /* Venus NV21: + * YUV 4:2:0 image with a plane of 8 bit Y samples followed + * by an interleaved V/U plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * V U V U V U V U V U V U . . . . ^ + * V U V U V U V U V U V U . . . . | + * V U V U V U V U V U V U . . . . | + * V U V U V U V U V U V U . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment + * + * Y_Stride : Width aligned to 128 + * UV_Stride : Width aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Extradata: Arbitrary (software-imposed) padding + * Total size = align((Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines + * + max(Extradata, Y_Stride * 8), 4096) + */ + COLOR_FMT_NV21, + /* Venus NV12_MVTB: + * Two YUV 4:2:0 images/views one after the other + * in a top-bottom layout, same as NV12 + * with a plane of 8 bit Y samples followed + * by an interleaved U/V plane containing 8 bit 2x2 subsampled + * colour difference samples. + * + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | | + * . . . . . . . . . . . . . . . . | View_1 + * . . . . . . . . . . . . . . . . | | + * . . . . . . . . . . . . . . . . | | + * . . . . . . . . . . . . . . . . V | + * U V U V U V U V U V U V . . . . ^ | + * U V U V U V U V U V U V . . . . | | + * U V U V U V U V U V U V . . . . | | + * U V U V U V U V U V U V . . . . UV_Scanlines | + * . . . . . . . . . . . . . . . . | | + * . . . . . . . . . . . . . . . . V V + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | | + * . . . . . . . . . . . . . . . . | View_2 + * . . . . . . . . . . . . . . . . | | + * . . . . . . . . . . . . . . . . | | + * . . . . . . . . . . . . . . . . V | + * U V U V U V U V U V U V . . . . ^ | + * U V U V U V U V U V U V . . . . | | + * U V U V U V U V U V U V . . . . | | + * U V U V U V U V U V U V . . . . UV_Scanlines | + * . . . . . . . . . . . . . . . . | | + * . . . . . . . . . . . . . . . . V V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width aligned to 128 + * UV_Stride : Width aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * View_1 begin at: 0 (zero) + * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines + * Extradata: Arbitrary (software-imposed) padding + * Total size = align((2*(Y_Stride * Y_Scanlines) + * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096) + */ + COLOR_FMT_NV12_MVTB, + /* + * The buffer can be of 2 types: + * (1) Venus NV12 UBWC Progressive + * (2) Venus NV12 UBWC Interlaced + * + * (1) Venus NV12 UBWC Progressive Buffer Format: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * Y_Stride = align(Width, 128) + * UV_Stride = align(Width, 128) + * Y_Scanlines = align(Height, 32) + * UV_Scanlines = align(Height/2, 16) + * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size + * + max(Extradata, Y_Stride * 48), 4096) + * + * + * (2) Venus NV12 UBWC Interlaced Buffer Format: + * Compressed Macro-tile format for NV12 interlaced. + * Contains 8 planes in the following order - + * (A) Y_Meta_Top_Field_Plane + * (B) Y_UBWC_Top_Field_Plane + * (C) UV_Meta_Top_Field_Plane + * (D) UV_UBWC_Top_Field_Plane + * (E) Y_Meta_Bottom_Field_Plane + * (F) Y_UBWC_Bottom_Field_Plane + * (G) UV_Meta_Bottom_Field_Plane + * (H) UV_UBWC_Bottom_Field_Plane + * Y_Meta_Top_Field_Plane consists of meta information to decode + * compressed tile data for Y_UBWC_Top_Field_Plane. + * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile + * format for top field of an interlaced frame. + * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together + * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed + * 8 bit Y samples for top field of an interlaced frame. + * + * UV_Meta_Top_Field_Plane consists of meta information to decode + * compressed tile data in UV_UBWC_Top_Field_Plane. + * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile + * format for top field of an interlaced frame. + * UBWC decoder block will use UV_Meta_Top_Field_Plane data together + * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed + * 8 bit subsampled color difference samples for top field of an + * interlaced frame. + * + * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is + * independently decodable and randomly accessible. There is no + * dependency between tiles. + * + * Y_Meta_Bottom_Field_Plane consists of meta information to decode + * compressed tile data for Y_UBWC_Bottom_Field_Plane. + * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile + * format for bottom field of an interlaced frame. + * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data + * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less + * uncompressed 8 bit Y samples for bottom field of an interlaced frame. + * + * UV_Meta_Bottom_Field_Plane consists of meta information to decode + * compressed tile data in UV_UBWC_Bottom_Field_Plane. + * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed + * macro-tile format for bottom field of an interlaced frame. + * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together + * with UV_UBWC_Bottom_Field_Plane data to produce loss-less + * uncompressed 8 bit subsampled color difference samples for bottom + * field of an interlaced frame. + * + * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is + * independently decodable and randomly accessible. There is no + * dependency between tiles. + * + * <-----Y_TF_Meta_Stride----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Half_height | + * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-Compressed tile Y_TF Stride-> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----UV_TF_Meta_Stride----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_TF_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-Compressed tile UV_TF Stride-> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-----Y_BF_Meta_Stride----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Half_height | + * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-Compressed tile Y_BF Stride-> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----UV_BF_Meta_Stride----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_BF_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <-Compressed tile UV_BF Stride-> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * Half_height = (Height+1)>>1 + * Y_TF_Stride = align(Width, 128) + * UV_TF_Stride = align(Width, 128) + * Y_TF_Scanlines = align(Half_height, 32) + * UV_TF_Scanlines = align((Half_height+1)/2, 32) + * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096) + * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096) + * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) + * Y_TF_Meta_Plane_size = + * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096) + * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) + * UV_TF_Meta_Plane_size = + * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096) + * Y_BF_Stride = align(Width, 128) + * UV_BF_Stride = align(Width, 128) + * Y_BF_Scanlines = align(Half_height, 32) + * UV_BF_Scanlines = align((Half_height+1)/2, 32) + * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096) + * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096) + * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16) + * Y_BF_Meta_Plane_size = + * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096) + * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16) + * UV_BF_Meta_Plane_size = + * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size + + * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size + + * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size + + * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size + + * + max(Extradata, Y_TF_Stride * 48), 4096) + */ + COLOR_FMT_NV12_UBWC, + /* Venus NV12 10-bit UBWC: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride -----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * + * Y_Stride = align(Width * 4/3, 128) + * UV_Stride = align(Width * 4/3, 128) + * Y_Scanlines = align(Height, 32) + * UV_Scanlines = align(Height/2, 16) + * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size + * + max(Extradata, Y_Stride * 48), 4096) + */ + COLOR_FMT_NV12_BPP10_UBWC, + /* Venus RGBA8888 format: + * Contains 1 plane in the following order - + * (A) RGBA plane + * + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 128) + * RGB_Scanlines = align(Height, 32) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align(RGB_Plane_size + Extradata, 4096) + */ + COLOR_FMT_RGBA8888, + /* Venus RGBA8888 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGBA plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 128) + * RGB_Scanlines = align(Height, 32) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size + + * Extradata, 4096) + */ + COLOR_FMT_RGBA8888_UBWC, + /* Venus RGBA1010102 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGBA plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 4, 256) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size + + * Extradata, 4096) + */ + COLOR_FMT_RGBA1010102_UBWC, + /* Venus RGB565 UBWC format: + * Contains 2 planes in the following order - + * (A) Meta plane + * (B) RGB plane + * + * <--- RGB_Meta_Stride ----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_RGB_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <-------- RGB_Stride --------> + * <------- Width -------> + * R R R R R R R R R R R R . . . . ^ ^ + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . Height | + * R R R R R R R R R R R R . . . . | RGB_Scanlines + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . | | + * R R R R R R R R R R R R . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * + * RGB_Stride = align(Width * 2, 128) + * RGB_Scanlines = align(Height, 16) + * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096) + * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64) + * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16) + * RGB_Meta_Plane_size = align(RGB_Meta_Stride * + * RGB_Meta_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size + + * Extradata, 4096) + */ + COLOR_FMT_RGB565_UBWC, + /* P010 UBWC: + * Compressed Macro-tile format for NV12. + * Contains 4 planes in the following order - + * (A) Y_Meta_Plane + * (B) Y_UBWC_Plane + * (C) UV_Meta_Plane + * (D) UV_UBWC_Plane + * + * Y_Meta_Plane consists of meta information to decode compressed + * tile data in Y_UBWC_Plane. + * Y_UBWC_Plane consists of Y data in compressed macro-tile format. + * UBWC decoder block will use the Y_Meta_Plane data together with + * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples. + * + * UV_Meta_Plane consists of meta information to decode compressed + * tile data in UV_UBWC_Plane. + * UV_UBWC_Plane consists of UV data in compressed macro-tile format. + * UBWC decoder block will use UV_Meta_Plane data together with + * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2 + * subsampled color difference samples. + * + * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable + * and randomly accessible. There is no dependency between tiles. + * + * <----- Y_Meta_Stride -----> + * <-------- Width ------> + * M M M M M M M M M M M M . . ^ ^ + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . Height | + * M M M M M M M M M M M M . . | Meta_Y_Scanlines + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . | | + * M M M M M M M M M M M M . . V | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . V + * <--Compressed tile Y Stride---> + * <------- Width -------> + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^ + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | | + * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * . . . . . . . . . . . . . . . . V + * <----- UV_Meta_Stride ----> + * M M M M M M M M M M M M . . ^ + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . | + * M M M M M M M M M M M M . . M_UV_Scanlines + * . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * <--Compressed tile UV Stride---> + * U* V* U* V* U* V* U* V* . . . . ^ + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . | + * U* V* U* V* U* V* U* V* . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k + * + * + * Y_Stride = align(Width * 2, 256) + * UV_Stride = align(Width * 2, 256) + * Y_Scanlines = align(Height, 16) + * UV_Scanlines = align(Height/2, 16) + * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096) + * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096) + * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64) + * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16) + * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096) + * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64) + * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16) + * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096) + * Extradata = 8k + * + * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size + + * Y_Meta_Plane_size + UV_Meta_Plane_size + * + max(Extradata, Y_Stride * 48), 4096) + */ + COLOR_FMT_P010_UBWC, + /* Venus P010: + * YUV 4:2:0 image with a plane of 10 bit Y samples followed + * by an interleaved U/V plane containing 10 bit 2x2 subsampled + * colour difference samples. + * + * <-------- Y/UV_Stride --------> + * <------- Width -------> + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | + * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * U V U V U V U V U V U V . . . . ^ + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . | + * U V U V U V U V U V U V . . . . UV_Scanlines + * . . . . . . . . . . . . . . . . | + * . . . . . . . . . . . . . . . . V + * . . . . . . . . . . . . . . . . --> Buffer size alignment + * + * Y_Stride : Width * 2 aligned to 128 + * UV_Stride : Width * 2 aligned to 128 + * Y_Scanlines: Height aligned to 32 + * UV_Scanlines: Height/2 aligned to 16 + * Extradata: Arbitrary (software-imposed) padding + * Total size = align((Y_Stride * Y_Scanlines + * + UV_Stride * UV_Scanlines + * + max(Extradata, Y_Stride * 8), 4096) + */ + COLOR_FMT_P010, +}; + +#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC +#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC +#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC +#define COLOR_FMT_P010 COLOR_FMT_P010 + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static unsigned int VENUS_Y_STRIDE(int color_fmt, int width) +{ + unsigned int stride = 0; + + if (!width) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV21: + case COLOR_FMT_NV12: + case COLOR_FMT_NV12_MVTB: + case COLOR_FMT_NV12_UBWC: + stride = MSM_MEDIA_ALIGN(width, 128); + break; + case COLOR_FMT_NV12_BPP10_UBWC: + stride = MSM_MEDIA_ALIGN(width, 192); + stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256); + break; + case COLOR_FMT_P010_UBWC: + stride = MSM_MEDIA_ALIGN(width * 2, 256); + break; + case COLOR_FMT_P010: + stride = MSM_MEDIA_ALIGN(width * 2, 128); + break; + } + + return stride; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static unsigned int VENUS_UV_STRIDE(int color_fmt, int width) +{ + unsigned int stride = 0; + + if (!width) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV21: + case COLOR_FMT_NV12: + case COLOR_FMT_NV12_MVTB: + case COLOR_FMT_NV12_UBWC: + stride = MSM_MEDIA_ALIGN(width, 128); + break; + case COLOR_FMT_NV12_BPP10_UBWC: + stride = MSM_MEDIA_ALIGN(width, 192); + stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256); + break; + case COLOR_FMT_P010_UBWC: + stride = MSM_MEDIA_ALIGN(width * 2, 256); + break; + case COLOR_FMT_P010: + stride = MSM_MEDIA_ALIGN(width * 2, 128); + break; + } + + return stride; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height) +{ + unsigned int sclines = 0; + + if (!height) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV21: + case COLOR_FMT_NV12: + case COLOR_FMT_NV12_MVTB: + case COLOR_FMT_NV12_UBWC: + case COLOR_FMT_P010: + sclines = MSM_MEDIA_ALIGN(height, 32); + break; + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + sclines = MSM_MEDIA_ALIGN(height, 16); + break; + } + + return sclines; +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height) +{ + unsigned int sclines = 0; + + if (!height) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV21: + case COLOR_FMT_NV12: + case COLOR_FMT_NV12_MVTB: + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + case COLOR_FMT_P010: + sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16); + break; + case COLOR_FMT_NV12_UBWC: + sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32); + break; + } + + return sclines; +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width) +{ + int y_tile_width = 0, y_meta_stride; + + if (!width) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + case COLOR_FMT_P010_UBWC: + y_tile_width = 32; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + y_tile_width = 48; + break; + default: + return 0; + } + + y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width); + return MSM_MEDIA_ALIGN(y_meta_stride, 64); +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height) +{ + int y_tile_height = 0, y_meta_scanlines; + + if (!height) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + y_tile_height = 8; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + y_tile_height = 4; + break; + default: + return 0; + } + + y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height); + return MSM_MEDIA_ALIGN(y_meta_scanlines, 16); +} + +/* + * Function arguments: + * @color_fmt + * @width + * Progressive: width + * Interlaced: width + */ +static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width) +{ + int uv_tile_width = 0, uv_meta_stride; + + if (!width) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + case COLOR_FMT_P010_UBWC: + uv_tile_width = 16; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + uv_tile_width = 24; + break; + default: + return 0; + } + + uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width); + return MSM_MEDIA_ALIGN(uv_meta_stride, 64); +} + +/* + * Function arguments: + * @color_fmt + * @height + * Progressive: height + * Interlaced: (height+1)>>1 + */ +static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height) +{ + int uv_tile_height = 0, uv_meta_scanlines; + + if (!height) + return 0; + + switch (color_fmt) { + case COLOR_FMT_NV12_UBWC: + uv_tile_height = 8; + break; + case COLOR_FMT_NV12_BPP10_UBWC: + case COLOR_FMT_P010_UBWC: + uv_tile_height = 4; + break; + default: + return 0; + } + + uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height); + return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16); +} + +static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width) +{ + unsigned int alignment = 0, bpp = 4; + + if (!width) + return 0; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888: + alignment = 128; + break; + case COLOR_FMT_RGB565_UBWC: + alignment = 256; + bpp = 2; + break; + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + alignment = 256; + break; + default: + return 0; + } + + return MSM_MEDIA_ALIGN(width * bpp, alignment); +} + +static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height) +{ + unsigned int alignment = 0; + + if (!height) + return 0; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888: + alignment = 32; + break; + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + case COLOR_FMT_RGB565_UBWC: + alignment = 16; + break; + default: + return 0; + } + + return MSM_MEDIA_ALIGN(height, alignment); +} + +static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width) +{ + int rgb_meta_stride; + + if (!width) + return 0; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + case COLOR_FMT_RGB565_UBWC: + rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16); + return MSM_MEDIA_ALIGN(rgb_meta_stride, 64); + } + + return 0; +} + +static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height) +{ + int rgb_meta_scanlines; + + if (!height) + return 0; + + switch (color_fmt) { + case COLOR_FMT_RGBA8888_UBWC: + case COLOR_FMT_RGBA1010102_UBWC: + case COLOR_FMT_RGB565_UBWC: + rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4); + return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16); + } + + return 0; +} + +#endif diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h new file mode 100644 index 000000000..a2b642294 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h @@ -0,0 +1,1181 @@ +#ifndef MDP4_XML +#define MDP4_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mdp4_pipe { + VG1 = 0, + VG2 = 1, + RGB1 = 2, + RGB2 = 3, + RGB3 = 4, + VG3 = 5, + VG4 = 6, +}; + +enum mdp4_mixer { + MIXER0 = 0, + MIXER1 = 1, + MIXER2 = 2, +}; + +enum mdp4_intf { + INTF_LCDC_DTV = 0, + INTF_DSI_VIDEO = 1, + INTF_DSI_CMD = 2, + INTF_EBI2_TV = 3, +}; + +enum mdp4_cursor_format { + CURSOR_ARGB = 1, + CURSOR_XRGB = 2, +}; + +enum mdp4_frame_format { + FRAME_LINEAR = 0, + FRAME_TILE_ARGB_4X4 = 1, + FRAME_TILE_YCBCR_420 = 2, +}; + +enum mdp4_scale_unit { + SCALE_FIR = 0, + SCALE_MN_PHASE = 1, + SCALE_PIXEL_RPT = 2, +}; + +enum mdp4_dma { + DMA_P = 0, + DMA_S = 1, + DMA_E = 2, +}; + +#define MDP4_IRQ_OVERLAY0_DONE 0x00000001 +#define MDP4_IRQ_OVERLAY1_DONE 0x00000002 +#define MDP4_IRQ_DMA_S_DONE 0x00000004 +#define MDP4_IRQ_DMA_E_DONE 0x00000008 +#define MDP4_IRQ_DMA_P_DONE 0x00000010 +#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020 +#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040 +#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080 +#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100 +#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200 +#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400 +#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800 +#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000 +#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000 +#define MDP4_IRQ_OVERLAY2_DONE 0x40000000 +#define REG_MDP4_VERSION 0x00000000 +#define MDP4_VERSION_MINOR__MASK 0x00ff0000 +#define MDP4_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDP4_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK; +} +#define MDP4_VERSION_MAJOR__MASK 0xff000000 +#define MDP4_VERSION_MAJOR__SHIFT 24 +static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK; +} + +#define REG_MDP4_OVLP0_KICK 0x00000004 + +#define REG_MDP4_OVLP1_KICK 0x00000008 + +#define REG_MDP4_OVLP2_KICK 0x000000d0 + +#define REG_MDP4_DMA_P_KICK 0x0000000c + +#define REG_MDP4_DMA_S_KICK 0x00000010 + +#define REG_MDP4_DMA_E_KICK 0x00000014 + +#define REG_MDP4_DISP_STATUS 0x00000018 + +#define REG_MDP4_DISP_INTF_SEL 0x00000038 +#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003 +#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0 +static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK; +} +#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c +#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2 +static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK; +} +#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030 +#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4 +static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val) +{ + return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK; +} +#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040 +#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080 + +#define REG_MDP4_RESET_STATUS 0x0000003c + +#define REG_MDP4_READ_CNFG 0x0000004c + +#define REG_MDP4_INTR_ENABLE 0x00000050 + +#define REG_MDP4_INTR_STATUS 0x00000054 + +#define REG_MDP4_INTR_CLEAR 0x00000058 + +#define REG_MDP4_EBI2_LCD0 0x00000060 + +#define REG_MDP4_EBI2_LCD1 0x00000064 + +#define REG_MDP4_PORTMAP_MODE 0x00000070 + +#define REG_MDP4_CS_CONTROLLER0 0x000000c0 + +#define REG_MDP4_CS_CONTROLLER1 0x000000c4 + +#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000 +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28 +static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK; +} +#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000 + +#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc + +#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100 +#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007 +#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008 +#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070 +#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080 +#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700 +#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800 +#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000 +#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28 +static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK; +} +#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000 + +#define REG_MDP4_VG2_SRC_FORMAT 0x00030050 + +#define REG_MDP4_VG2_CONST_COLOR 0x00031008 + +#define REG_MDP4_OVERLAY_FLUSH 0x00018000 +#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001 +#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002 +#define MDP4_OVERLAY_FLUSH_VG1 0x00000004 +#define MDP4_OVERLAY_FLUSH_VG2 0x00000008 +#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010 +#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020 + +static inline uint32_t __offset_OVLP(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00010000; + case 1: return 0x00018000; + case 2: return 0x00088000; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); } +#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK; +} +#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); } + +static inline uint32_t __offset_STAGE(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000104; + case 1: return 0x00000124; + case 2: return 0x00000144; + case 3: return 0x00000160; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); } +#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003 +#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0 +static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK; +} +#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004 +#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008 +#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030 +#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4 +static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK; +} +#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040 +#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080 +#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100 +#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200 + +static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); } + +static inline uint32_t __offset_STAGE_CO3(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00001004; + case 1: return 0x00001404; + case 2: return 0x00001804; + case 3: return 0x00001b84; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } + +static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); } +#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001 + +static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); } + +static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); } + + +static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; } + +#define REG_MDP4_DMA_P_OP_MODE 0x00090070 + +static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; } + +static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; } + +#define REG_MDP4_DMA_S_OP_MODE 0x000a0028 + +static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; } + +static inline uint32_t __offset_DMA(enum mdp4_dma idx) +{ + switch (idx) { + case DMA_P: return 0x00090000; + case DMA_S: return 0x000a0000; + case DMA_E: return 0x000b0000; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); } +#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003 +#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0 +static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK; +} +#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c +#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2 +static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK; +} +#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030 +#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4 +static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK; +} +#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080 +#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00 +#define MDP4_DMA_CONFIG_PACK__SHIFT 8 +static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val) +{ + return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK; +} +#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000 +#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000 + +static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); } +#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK; +} +#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); } +#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK; +} +#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f +#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK; +} +#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000 +#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff +#define MDP4_DMA_CURSOR_POS_X__SHIFT 0 +static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK; +} +#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000 +#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16 +static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val) +{ + return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK; +} + +static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); } +#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001 +#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006 +#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1 +static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val) +{ + return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK; +} +#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008 + +static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); } + +static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); } + + +static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mdp4_pipe i0) { return 0x00020000 + 0x10000*i0; } +#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mdp4_pipe i0) { return 0x00020004 + 0x10000*i0; } +#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000 +#define MDP4_PIPE_SRC_XY_Y__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK; +} +#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff +#define MDP4_PIPE_SRC_XY_X__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mdp4_pipe i0) { return 0x00020008 + 0x10000*i0; } +#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mdp4_pipe i0) { return 0x0002000c + 0x10000*i0; } +#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000 +#define MDP4_PIPE_DST_XY_Y__SHIFT 16 +static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK; +} +#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff +#define MDP4_PIPE_DST_XY_X__SHIFT 0 +static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val) +{ + return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mdp4_pipe i0) { return 0x00020010 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mdp4_pipe i0) { return 0x00020014 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mdp4_pipe i0) { return 0x00020018 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRCP3_BASE(enum mdp4_pipe i0) { return 0x0002001c + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mdp4_pipe i0) { return 0x00020040 + 0x10000*i0; } +#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff +#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK; +} +#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 +#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mdp4_pipe i0) { return 0x00020044 + 0x10000*i0; } +#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff +#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK; +} +#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 +#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; } +#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK; +} +#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff +#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } +#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 +#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c +#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 +#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 +#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 +#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 +#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 +#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK 0x00180000 +#define MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT 19 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__SHIFT) & MDP4_PIPE_SRC_FORMAT_FETCH_PLANES__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x0c000000 +#define MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 26 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; +} +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK 0x60000000 +#define MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT 29 +static inline uint32_t MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(enum mdp4_frame_format val) +{ + return ((val) << MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__SHIFT) & MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mdp4_pipe i0) { return 0x00020054 + 0x10000*i0; } +#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff +#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 +#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 +#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK; +} +#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 +#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 +static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val) +{ + return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK; +} + +static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mdp4_pipe i0) { return 0x00020058 + 0x10000*i0; } +#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001 +#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002 +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK 0x0000000c +#define MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT 2 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL__MASK; +} +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK 0x00000030 +#define MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT 4 +static inline uint32_t MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(enum mdp4_scale_unit val) +{ + return ((val) << MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__SHIFT) & MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL__MASK; +} +#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200 +#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400 +#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800 +#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000 +#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000 +#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000 +#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000 +#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000 +#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000 + +static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mdp4_pipe i0) { return 0x0002005c + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mdp4_pipe i0) { return 0x00020060 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mdp4_pipe i0) { return 0x00021004 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mdp4_pipe i0) { return 0x00021008 + 0x10000*i0; } + +static inline uint32_t REG_MDP4_PIPE_CSC(enum mdp4_pipe i0) { return 0x00024000 + 0x10000*i0; } + + +static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } + +static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mdp4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; } + +#define REG_MDP4_LCDC 0x000c0000 + +#define REG_MDP4_LCDC_ENABLE 0x000c0000 + +#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004 +#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008 + +#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c + +#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010 +#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014 + +#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018 + +#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c +#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK; +} +#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK; +} +#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020 + +#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024 + +#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028 + +#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c +#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030 + +#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034 + +#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038 +#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + +#define REG_MDP4_LCDC_LVDS_INTF_CTL 0x000c2000 +#define MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL 0x00000004 +#define MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT 0x00000008 +#define MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP 0x00000010 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_RES_BIT 0x00000020 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_RES_BIT 0x00000040 +#define MDP4_LCDC_LVDS_INTF_CTL_ENABLE 0x00000080 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN 0x00000100 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN 0x00000200 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN 0x00000400 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN 0x00000800 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN 0x00001000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN 0x00002000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN 0x00004000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN 0x00008000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN 0x00010000 +#define MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN 0x00020000 + +static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL(uint32_t i0) { return 0x000c2014 + 0x8*i0; } + +static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(uint32_t i0) { return 0x000c2014 + 0x8*i0; } +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK 0x000000ff +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT 0 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK 0x0000ff00 +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT 8 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK 0x00ff0000 +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT 16 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK 0xff000000 +#define MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT 24 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3__MASK; +} + +static inline uint32_t REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(uint32_t i0) { return 0x000c2018 + 0x8*i0; } +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK 0x000000ff +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT 0 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK 0x0000ff00 +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT 8 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5__MASK; +} +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK 0x00ff0000 +#define MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT 16 +static inline uint32_t MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(uint32_t val) +{ + return ((val) << MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__SHIFT) & MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6__MASK; +} + +#define REG_MDP4_LCDC_LVDS_PHY_RESET 0x000c2034 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_0 0x000c3000 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_1 0x000c3004 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_2 0x000c3008 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_3 0x000c300c + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_5 0x000c3014 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_6 0x000c3018 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_7 0x000c301c + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_8 0x000c3020 + +#define REG_MDP4_LVDS_PHY_PLL_CTRL_9 0x000c3024 + +#define REG_MDP4_LVDS_PHY_PLL_LOCKED 0x000c3080 + +#define REG_MDP4_LVDS_PHY_CFG2 0x000c3108 + +#define REG_MDP4_LVDS_PHY_CFG0 0x000c3100 +#define MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE 0x00000010 +#define MDP4_LVDS_PHY_CFG0_CHANNEL0 0x00000040 +#define MDP4_LVDS_PHY_CFG0_CHANNEL1 0x00000080 + +#define REG_MDP4_DTV 0x000d0000 + +#define REG_MDP4_DTV_ENABLE 0x000d0000 + +#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004 +#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008 + +#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c + +#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018 +#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c + +#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020 + +#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c +#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK; +} +#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK; +} +#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030 + +#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038 + +#define REG_MDP4_DTV_BORDER_CLR 0x000d0040 + +#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044 +#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048 + +#define REG_MDP4_DTV_TEST_CNTL 0x000d004c + +#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050 +#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + +#define REG_MDP4_DSI 0x000e0000 + +#define REG_MDP4_DSI_ENABLE 0x000e0000 + +#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004 +#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff +#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0 +static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val) +{ + return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK; +} +#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000 +#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16 +static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val) +{ + return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK; +} + +#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008 + +#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c + +#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010 +#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff +#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0 +static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val) +{ + return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK; +} +#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000 +#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16 +static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val) +{ + return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK; +} + +#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014 + +#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018 + +#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c +#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK; +} +#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK; +} +#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000 + +#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020 + +#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024 + +#define REG_MDP4_DSI_BORDER_CLR 0x000e0028 + +#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c +#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff +#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0 +static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val) +{ + return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK; +} +#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000 + +#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030 + +#define REG_MDP4_DSI_TEST_CNTL 0x000e0034 + +#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038 +#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001 +#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002 +#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004 + + +#endif /* MDP4_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c new file mode 100644 index 000000000..310095722 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include +#include +#include +#include + +#include "mdp4_kms.h" +#include "msm_gem.h" + +struct mdp4_crtc { + struct drm_crtc base; + char name[8]; + int id; + int ovlp; + enum mdp4_dma dma; + bool enabled; + + /* which mixer/encoder we route output to: */ + int mixer; + + struct { + spinlock_t lock; + bool stale; + uint32_t width, height; + uint32_t x, y; + + /* next cursor to scan-out: */ + uint32_t next_iova; + struct drm_gem_object *next_bo; + + /* current cursor being scanned out: */ + struct drm_gem_object *scanout_bo; + } cursor; + + + /* if there is a pending flip, these will be non-null: */ + struct drm_pending_vblank_event *event; + + /* Bits have been flushed at the last commit, + * used to decide if a vsync has happened since last commit. + */ + u32 flushed_mask; + +#define PENDING_CURSOR 0x1 +#define PENDING_FLIP 0x2 + atomic_t pending; + + /* for unref'ing cursor bo's after scanout completes: */ + struct drm_flip_work unref_cursor_work; + + struct mdp_irq vblank; + struct mdp_irq err; +}; +#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) + +static struct mdp4_kms *get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv = crtc->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void request_pending(struct drm_crtc *crtc, uint32_t pending) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + + atomic_or(pending, &mdp4_crtc->pending); + mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); +} + +static void crtc_flush(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct drm_plane *plane; + uint32_t flush = 0; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + flush |= pipe2flush(pipe_id); + } + + flush |= ovlp2flush(mdp4_crtc->ovlp); + + DBG("%s: flush=%08x", mdp4_crtc->name, flush); + + mdp4_crtc->flushed_mask = flush; + + mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); +} + +/* if file!=NULL, this is preclose potential cancel-flip path */ +static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_pending_vblank_event *event; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = mdp4_crtc->event; + if (event) { + mdp4_crtc->event = NULL; + DBG("%s: send event: %p", mdp4_crtc->name, event); + drm_crtc_send_vblank_event(crtc, event); + } + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +static void unref_cursor_worker(struct drm_flip_work *work, void *val) +{ + struct mdp4_crtc *mdp4_crtc = + container_of(work, struct mdp4_crtc, unref_cursor_work); + struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); + struct msm_kms *kms = &mdp4_kms->base.base; + + msm_gem_unpin_iova(val, kms->aspace); + drm_gem_object_put(val); +} + +static void mdp4_crtc_destroy(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + + drm_crtc_cleanup(crtc); + drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); + + kfree(mdp4_crtc); +} + +/* statically (for now) map planes to mixer stage (z-order): */ +static const int idxs[] = { + [VG1] = 1, + [VG2] = 2, + [RGB1] = 0, + [RGB2] = 0, + [RGB3] = 0, + [VG3] = 3, + [VG4] = 4, + +}; + +/* setup mixer config, for which we need to consider all crtc's and + * the planes attached to them + * + * TODO may possibly need some extra locking here + */ +static void setup_mixer(struct mdp4_kms *mdp4_kms) +{ + struct drm_mode_config *config = &mdp4_kms->dev->mode_config; + struct drm_crtc *crtc; + uint32_t mixer_cfg = 0; + static const enum mdp_mixer_stage_id stages[] = { + STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, + }; + + list_for_each_entry(crtc, &config->crtc_list, head) { + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_plane *plane; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + int idx = idxs[pipe_id]; + mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer, + pipe_id, stages[idx]); + } + } + + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); +} + +static void blend_setup(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct drm_plane *plane; + int i, ovlp = mdp4_crtc->ovlp; + bool alpha[4]= { false, false, false, false }; + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); + + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + int idx = idxs[pipe_id]; + if (idx > 0) { + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(plane->state->fb)); + alpha[idx-1] = format->alpha_enable; + } + } + + for (i = 0; i < 4; i++) { + uint32_t op; + + if (alpha[i]) { + op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) | + MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) | + MDP4_OVLP_STAGE_OP_BG_INV_ALPHA; + } else { + op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) | + MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST); + } + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); + } + + setup_mixer(mdp4_kms); +} + +static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + enum mdp4_dma dma = mdp4_crtc->dma; + int ovlp = mdp4_crtc->ovlp; + struct drm_display_mode *mode; + + if (WARN_ON(!crtc->state)) + return; + + mode = &crtc->state->adjusted_mode; + + DBG("%s: set mode: " DRM_MODE_FMT, + mdp4_crtc->name, DRM_MODE_ARG(mode)); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), + MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | + MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); + + /* take data from pipe: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), + MDP4_DMA_DST_SIZE_WIDTH(0) | + MDP4_DMA_DST_SIZE_HEIGHT(0)); + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), + MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | + MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0); + + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); + + if (dma == DMA_E) { + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); + mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); + } +} + +static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + unsigned long flags; + + DBG("%s", mdp4_crtc->name); + + if (WARN_ON(!mdp4_crtc->enabled)) + return; + + /* Disable/save vblank irq handling before power is disabled */ + drm_crtc_vblank_off(crtc); + + mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); + mdp4_disable(mdp4_kms); + + if (crtc->state->event && !crtc->state->active) { + WARN_ON(mdp4_crtc->event); + spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags); + } + + mdp4_crtc->enabled = false; +} + +static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + DBG("%s", mdp4_crtc->name); + + if (WARN_ON(mdp4_crtc->enabled)) + return; + + mdp4_enable(mdp4_kms); + + /* Restore vblank irq handling after power is enabled */ + drm_crtc_vblank_on(crtc); + + mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); + + crtc_flush(crtc); + + mdp4_crtc->enabled = true; +} + +static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + DBG("%s: check", mdp4_crtc->name); + // TODO anything else to check? + return 0; +} + +static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + DBG("%s: begin", mdp4_crtc->name); +} + +static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event); + + WARN_ON(mdp4_crtc->event); + + spin_lock_irqsave(&dev->event_lock, flags); + mdp4_crtc->event = crtc->state->event; + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + blend_setup(crtc); + crtc_flush(crtc); + request_pending(crtc, PENDING_FLIP); +} + +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + +/* called from IRQ to update cursor related registers (if needed). The + * cursor registers, other than x/y position, appear not to be double + * buffered, and changing them other than from vblank seems to trigger + * underflow. + */ +static void update_cursor(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct msm_kms *kms = &mdp4_kms->base.base; + enum mdp4_dma dma = mdp4_crtc->dma; + unsigned long flags; + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + if (mdp4_crtc->cursor.stale) { + struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; + struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; + uint64_t iova = mdp4_crtc->cursor.next_iova; + + if (next_bo) { + /* take a obj ref + iova ref when we start scanning out: */ + drm_gem_object_get(next_bo); + msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova); + + /* enable cursor: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), + MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) | + MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height)); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), + MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) | + MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); + } else { + /* disable cursor: */ + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), + mdp4_kms->blank_cursor_iova); + } + + /* and drop the iova ref + obj rev when done scanning out: */ + if (prev_bo) + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo); + + mdp4_crtc->cursor.scanout_bo = next_bo; + mdp4_crtc->cursor.stale = false; + } + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), + MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | + MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); + + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); +} + +static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file_priv, uint32_t handle, + uint32_t width, uint32_t height) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + struct msm_kms *kms = &mdp4_kms->base.base; + struct drm_device *dev = crtc->dev; + struct drm_gem_object *cursor_bo, *old_bo; + unsigned long flags; + uint64_t iova; + int ret; + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height); + return -EINVAL; + } + + if (handle) { + cursor_bo = drm_gem_object_lookup(file_priv, handle); + if (!cursor_bo) + return -ENOENT; + } else { + cursor_bo = NULL; + } + + if (cursor_bo) { + ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova); + if (ret) + goto fail; + } else { + iova = 0; + } + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + old_bo = mdp4_crtc->cursor.next_bo; + mdp4_crtc->cursor.next_bo = cursor_bo; + mdp4_crtc->cursor.next_iova = iova; + mdp4_crtc->cursor.width = width; + mdp4_crtc->cursor.height = height; + mdp4_crtc->cursor.stale = true; + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); + + if (old_bo) { + /* drop our previous reference: */ + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); + } + + request_pending(crtc, PENDING_CURSOR); + + return 0; + +fail: + drm_gem_object_put(cursor_bo); + return ret; +} + +static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + unsigned long flags; + + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + mdp4_crtc->cursor.x = x; + mdp4_crtc->cursor.y = y; + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); + + crtc_flush(crtc); + request_pending(crtc, PENDING_CURSOR); + + return 0; +} + +static const struct drm_crtc_funcs mdp4_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp4_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .cursor_set = mdp4_crtc_cursor_set, + .cursor_move = mdp4_crtc_cursor_move, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = msm_crtc_enable_vblank, + .disable_vblank = msm_crtc_disable_vblank, +}; + +static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { + .mode_set_nofb = mdp4_crtc_mode_set_nofb, + .atomic_check = mdp4_crtc_atomic_check, + .atomic_begin = mdp4_crtc_atomic_begin, + .atomic_flush = mdp4_crtc_atomic_flush, + .atomic_enable = mdp4_crtc_atomic_enable, + .atomic_disable = mdp4_crtc_atomic_disable, +}; + +static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); + struct drm_crtc *crtc = &mdp4_crtc->base; + struct msm_drm_private *priv = crtc->dev->dev_private; + unsigned pending; + + mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank); + + pending = atomic_xchg(&mdp4_crtc->pending, 0); + + if (pending & PENDING_FLIP) { + complete_flip(crtc, NULL); + } + + if (pending & PENDING_CURSOR) { + update_cursor(crtc); + drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); + } +} + +static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); + struct drm_crtc *crtc = &mdp4_crtc->base; + DBG("%s: error: %08x", mdp4_crtc->name, irqstatus); + crtc_flush(crtc); +} + +static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + int ret; + + ret = drm_crtc_vblank_get(crtc); + if (ret) + return; + + ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) & + mdp4_crtc->flushed_mask), + msecs_to_jiffies(50)); + if (ret <= 0) + dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id); + + mdp4_crtc->flushed_mask = 0; + + drm_crtc_vblank_put(crtc); +} + +uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + return mdp4_crtc->vblank.irqmask; +} + +/* set dma config, ie. the format the encoder wants. */ +void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config); +} + +/* set interface for routing crtc->encoder: */ +void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + uint32_t intf_sel; + + intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL); + + switch (mdp4_crtc->dma) { + case DMA_P: + intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf); + break; + case DMA_S: + intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf); + break; + case DMA_E: + intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK; + intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf); + break; + } + + if (intf == INTF_DSI_VIDEO) { + intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD; + intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO; + } else if (intf == INTF_DSI_CMD) { + intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO; + intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD; + } + + mdp4_crtc->mixer = mixer; + + blend_setup(crtc); + + DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel); + + mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); +} + +void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc) +{ + /* wait_for_flush_done is the only case for now. + * Later we will have command mode CRTC to wait for + * other event. + */ + mdp4_crtc_wait_for_flush_done(crtc); +} + +static const char *dma_names[] = { + "DMA_P", "DMA_S", "DMA_E", +}; + +/* initialize crtc */ +struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, + struct drm_plane *plane, int id, int ovlp_id, + enum mdp4_dma dma_id) +{ + struct drm_crtc *crtc = NULL; + struct mdp4_crtc *mdp4_crtc; + + mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); + if (!mdp4_crtc) + return ERR_PTR(-ENOMEM); + + crtc = &mdp4_crtc->base; + + mdp4_crtc->id = id; + + mdp4_crtc->ovlp = ovlp_id; + mdp4_crtc->dma = dma_id; + + mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma); + mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq; + + mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma); + mdp4_crtc->err.irq = mdp4_crtc_err_irq; + + snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d", + dma_names[dma_id], ovlp_id); + + spin_lock_init(&mdp4_crtc->cursor.lock); + + drm_flip_work_init(&mdp4_crtc->unref_cursor_work, + "unref cursor", unref_cursor_worker); + + drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs, + NULL); + drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); + + return crtc; +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c new file mode 100644 index 000000000..39b8fe53c --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, Inforce Computing. All rights reserved. + * + * Author: Vinay Simha + */ + +#include +#include + +#include "mdp4_kms.h" + +#ifdef CONFIG_DRM_MSM_DSI + +struct mdp4_dsi_encoder { + struct drm_encoder base; + struct drm_panel *panel; + bool enabled; +}; +#define to_mdp4_dsi_encoder(x) container_of(x, struct mdp4_dsi_encoder, base) + +static struct mdp4_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(mdp4_dsi_encoder); +} + +static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = { + .destroy = mdp4_dsi_encoder_destroy, +}; + +static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t dsi_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + + mode = adjusted_mode; + + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); + + ctrl_pol = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP4_DSI_CTRL_POLARITY_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP4_DSI_CTRL_POLARITY_VSYNC_LOW; + /* probably need to get DATA_EN polarity from panel.. */ + + dsi_hsync_skew = 0; /* get this from panel? */ + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dsi_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dsi_hsync_skew - 1; + + mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_CTRL, + MDP4_DSI_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP4_DSI_HSYNC_CTRL_PERIOD(mode->htotal)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_PERIOD, vsync_period); + mdp4_write(mdp4_kms, REG_MDP4_DSI_VSYNC_LEN, vsync_len); + mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_HCTRL, + MDP4_DSI_DISPLAY_HCTRL_START(hsync_start_x) | + MDP4_DSI_DISPLAY_HCTRL_END(hsync_end_x)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VSTART, display_v_start); + mdp4_write(mdp4_kms, REG_MDP4_DSI_DISPLAY_VEND, display_v_end); + + mdp4_write(mdp4_kms, REG_MDP4_DSI_CTRL_POLARITY, ctrl_pol); + mdp4_write(mdp4_kms, REG_MDP4_DSI_UNDERFLOW_CLR, + MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY | + MDP4_DSI_UNDERFLOW_CLR_COLOR(0xff)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_HCTL, + MDP4_DSI_ACTIVE_HCTL_START(0) | + MDP4_DSI_ACTIVE_HCTL_END(0)); + mdp4_write(mdp4_kms, REG_MDP4_DSI_HSYNC_SKEW, dsi_hsync_skew); + mdp4_write(mdp4_kms, REG_MDP4_DSI_BORDER_CLR, 0); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VSTART, 0); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ACTIVE_VEND, 0); +} + +static void mdp4_dsi_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (!mdp4_dsi_encoder->enabled) + return; + + mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); + + mdp4_dsi_encoder->enabled = false; +} + +static void mdp4_dsi_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (mdp4_dsi_encoder->enabled) + return; + + mdp4_crtc_set_config(encoder->crtc, + MDP4_DMA_CONFIG_PACK_ALIGN_MSB | + MDP4_DMA_CONFIG_DEFLKR_EN | + MDP4_DMA_CONFIG_DITHER_EN | + MDP4_DMA_CONFIG_R_BPC(BPC8) | + MDP4_DMA_CONFIG_G_BPC(BPC8) | + MDP4_DMA_CONFIG_B_BPC(BPC8) | + MDP4_DMA_CONFIG_PACK(0x21)); + + mdp4_crtc_set_intf(encoder->crtc, INTF_DSI_VIDEO, 0); + + mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 1); + + mdp4_dsi_encoder->enabled = true; +} + +static const struct drm_encoder_helper_funcs mdp4_dsi_encoder_helper_funcs = { + .mode_set = mdp4_dsi_encoder_mode_set, + .disable = mdp4_dsi_encoder_disable, + .enable = mdp4_dsi_encoder_enable, +}; + +/* initialize encoder */ +struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) +{ + struct drm_encoder *encoder = NULL; + struct mdp4_dsi_encoder *mdp4_dsi_encoder; + int ret; + + mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL); + if (!mdp4_dsi_encoder) { + ret = -ENOMEM; + goto fail; + } + + encoder = &mdp4_dsi_encoder->base; + + drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs, + DRM_MODE_ENCODER_DSI, NULL); + drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs); + + return encoder; + +fail: + if (encoder) + mdp4_dsi_encoder_destroy(encoder); + + return ERR_PTR(ret); +} +#endif /* CONFIG_DRM_MSM_DSI */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c new file mode 100644 index 000000000..88645dbc3 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_dtv_encoder { + struct drm_encoder base; + struct clk *hdmi_clk; + struct clk *mdp_clk; + unsigned long int pixclock; + bool enabled; + uint32_t bsc; +}; +#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base) + +static struct mdp4_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + drm_encoder_cleanup(encoder); + kfree(mdp4_dtv_encoder); +} + +static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = { + .destroy = mdp4_dtv_encoder_destroy, +}; + +static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + + mode = adjusted_mode; + + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); + + mdp4_dtv_encoder->pixclock = mode->clock * 1000; + + DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock); + + ctrl_pol = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW; + /* probably need to get DATA_EN polarity from panel.. */ + + dtv_hsync_skew = 0; /* get this from panel? */ + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + + mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL, + MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period); + mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL, + MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) | + MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start); + mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end); + mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0); + mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR, + MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY | + MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew); + mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL, + MDP4_DTV_ACTIVE_HCTL_START(0) | + MDP4_DTV_ACTIVE_HCTL_END(0)); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0); +} + +static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + + if (WARN_ON(!mdp4_dtv_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); + + clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); + clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); + + mdp4_dtv_encoder->enabled = false; +} + +static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + unsigned long pc = mdp4_dtv_encoder->pixclock; + int ret; + + if (WARN_ON(mdp4_dtv_encoder->enabled)) + return; + + mdp4_crtc_set_config(encoder->crtc, + MDP4_DMA_CONFIG_R_BPC(BPC8) | + MDP4_DMA_CONFIG_G_BPC(BPC8) | + MDP4_DMA_CONFIG_B_BPC(BPC8) | + MDP4_DMA_CONFIG_PACK(0x21)); + mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1); + + DBG("setting mdp_clk=%lu", pc); + + ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n", + pc, ret); + + ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret); + + ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret); + + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); + + mdp4_dtv_encoder->enabled = true; +} + +static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = { + .mode_set = mdp4_dtv_encoder_mode_set, + .enable = mdp4_dtv_encoder_enable, + .disable = mdp4_dtv_encoder_disable, +}; + +long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) +{ + struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); + return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate); +} + +/* initialize encoder */ +struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev) +{ + struct drm_encoder *encoder = NULL; + struct mdp4_dtv_encoder *mdp4_dtv_encoder; + int ret; + + mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL); + if (!mdp4_dtv_encoder) { + ret = -ENOMEM; + goto fail; + } + + encoder = &mdp4_dtv_encoder->base; + + drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); + + mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); + if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { + DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n"); + ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk); + goto fail; + } + + mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk"); + if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { + DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n"); + ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); + goto fail; + } + + return encoder; + +fail: + if (encoder) + mdp4_dtv_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c new file mode 100644 index 000000000..4d49f3ba6 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include + +#include "msm_drv.h" +#include "mdp4_kms.h" + +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) +{ + mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_CLEAR, + irqmask ^ (irqmask & old_irqmask)); + mdp4_write(to_mdp4_kms(mdp_kms), REG_MDP4_INTR_ENABLE, irqmask); +} + +static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler); + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); + extern bool dumpstate; + + DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); + + if (dumpstate && __ratelimit(&rs)) { + struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev); + drm_state_dump(mdp4_kms->dev, &p); + } +} + +void mdp4_irq_preinstall(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + mdp4_enable(mdp4_kms); + mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); + mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); + mdp4_disable(mdp4_kms); +} + +int mdp4_irq_postinstall(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); + struct mdp_irq *error_handler = &mdp4_kms->error_handler; + + error_handler->irq = mdp4_irq_error_handler; + error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | + MDP4_IRQ_EXTERNAL_INTF_UDERRUN; + + mdp_irq_register(mdp_kms, error_handler); + + return 0; +} + +void mdp4_irq_uninstall(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + mdp4_enable(mdp4_kms); + mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); + mdp4_disable(mdp4_kms); +} + +irqreturn_t mdp4_irq(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms); + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + unsigned int id; + uint32_t status, enable; + + enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE); + status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS) & enable; + mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); + + VERB("status=%08x", status); + + mdp_dispatch_irqs(mdp_kms, status); + + for (id = 0; id < priv->num_crtcs; id++) + if (status & mdp4_crtc_vblank(priv->crtcs[id])) + drm_handle_vblank(dev, id); + + return IRQ_HANDLED; +} + +int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp4_crtc_vblank(crtc), true); + mdp4_disable(mdp4_kms); + + return 0; +} + +void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + + mdp4_enable(mdp4_kms); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp4_crtc_vblank(crtc), false); + mdp4_disable(mdp4_kms); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c new file mode 100644 index 000000000..964573d26 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include + +#include + +#include "msm_drv.h" +#include "msm_gem.h" +#include "msm_mmu.h" +#include "mdp4_kms.h" + +static int mdp4_hw_init(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct drm_device *dev = mdp4_kms->dev; + u32 dmap_cfg, vg_cfg; + unsigned long clk; + + pm_runtime_get_sync(dev->dev); + + if (mdp4_kms->rev > 1) { + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); + } + + mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3); + + /* max read pending cmd config, 3 pending requests: */ + mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222); + + clk = clk_get_rate(mdp4_kms->clk); + + if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) { + dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */ + vg_cfg = 0x47; /* 16 bytes-burs x 8 req */ + } else { + dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */ + vg_cfg = 0x43; /* 16 bytes-burst x 4 req */ + } + + DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg); + + mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg); + mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg); + + if (mdp4_kms->rev >= 2) + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1); + mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, 0); + + /* disable CSC matrix / YUV by default: */ + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0); + mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0); + mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0); + + if (mdp4_kms->rev > 1) + mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); + + pm_runtime_put_sync(dev->dev); + + return 0; +} + +static void mdp4_enable_commit(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + mdp4_enable(mdp4_kms); +} + +static void mdp4_disable_commit(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + mdp4_disable(mdp4_kms); +} + +static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ +} + +static void mdp4_flush_commit(struct msm_kms *kms, unsigned crtc_mask) +{ + /* TODO */ +} + +static void mdp4_wait_flush(struct msm_kms *kms, unsigned crtc_mask) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct drm_crtc *crtc; + + for_each_crtc_mask(mdp4_kms->dev, crtc, crtc_mask) + mdp4_crtc_wait_for_commit_done(crtc); +} + +static void mdp4_complete_commit(struct msm_kms *kms, unsigned crtc_mask) +{ +} + +static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, + struct drm_encoder *encoder) +{ + /* if we had >1 encoder, we'd need something more clever: */ + switch (encoder->encoder_type) { + case DRM_MODE_ENCODER_TMDS: + return mdp4_dtv_round_pixclk(encoder, rate); + case DRM_MODE_ENCODER_LVDS: + case DRM_MODE_ENCODER_DSI: + default: + return rate; + } +} + +static void mdp4_destroy(struct msm_kms *kms) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct device *dev = mdp4_kms->dev->dev; + struct msm_gem_address_space *aspace = kms->aspace; + + if (mdp4_kms->blank_cursor_iova) + msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace); + drm_gem_object_put(mdp4_kms->blank_cursor_bo); + + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu); + msm_gem_address_space_put(aspace); + } + + if (mdp4_kms->rpm_enabled) + pm_runtime_disable(dev); + + mdp_kms_destroy(&mdp4_kms->base); + + kfree(mdp4_kms); +} + +static const struct mdp_kms_funcs kms_funcs = { + .base = { + .hw_init = mdp4_hw_init, + .irq_preinstall = mdp4_irq_preinstall, + .irq_postinstall = mdp4_irq_postinstall, + .irq_uninstall = mdp4_irq_uninstall, + .irq = mdp4_irq, + .enable_vblank = mdp4_enable_vblank, + .disable_vblank = mdp4_disable_vblank, + .enable_commit = mdp4_enable_commit, + .disable_commit = mdp4_disable_commit, + .prepare_commit = mdp4_prepare_commit, + .flush_commit = mdp4_flush_commit, + .wait_flush = mdp4_wait_flush, + .complete_commit = mdp4_complete_commit, + .get_format = mdp_get_format, + .round_pixclk = mdp4_round_pixclk, + .destroy = mdp4_destroy, + }, + .set_irqmask = mdp4_set_irqmask, +}; + +int mdp4_disable(struct mdp4_kms *mdp4_kms) +{ + DBG(""); + + clk_disable_unprepare(mdp4_kms->clk); + clk_disable_unprepare(mdp4_kms->pclk); + clk_disable_unprepare(mdp4_kms->lut_clk); + clk_disable_unprepare(mdp4_kms->axi_clk); + + return 0; +} + +int mdp4_enable(struct mdp4_kms *mdp4_kms) +{ + DBG(""); + + clk_prepare_enable(mdp4_kms->clk); + clk_prepare_enable(mdp4_kms->pclk); + clk_prepare_enable(mdp4_kms->lut_clk); + clk_prepare_enable(mdp4_kms->axi_clk); + + return 0; +} + + +static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, + int intf_type) +{ + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct device_node *panel_node; + int dsi_id; + int ret; + + switch (intf_type) { + case DRM_MODE_ENCODER_LVDS: + /* + * bail out early if there is no panel node (no need to + * initialize LCDC encoder and LVDS connector) + */ + panel_node = of_graph_get_remote_node(dev->dev->of_node, 0, 0); + if (!panel_node) + return 0; + + encoder = mdp4_lcdc_encoder_init(dev, panel_node); + if (IS_ERR(encoder)) { + DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); + of_node_put(panel_node); + return PTR_ERR(encoder); + } + + /* LCDC can be hooked to DMA_P (TODO: Add DMA_S later?) */ + encoder->possible_crtcs = 1 << DMA_P; + + connector = mdp4_lvds_connector_init(dev, panel_node, encoder); + if (IS_ERR(connector)) { + DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); + of_node_put(panel_node); + return PTR_ERR(connector); + } + + break; + case DRM_MODE_ENCODER_TMDS: + encoder = mdp4_dtv_encoder_init(dev); + if (IS_ERR(encoder)) { + DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n"); + return PTR_ERR(encoder); + } + + /* DTV can be hooked to DMA_E: */ + encoder->possible_crtcs = 1 << 1; + + if (priv->hdmi) { + /* Construct bridge/connector for HDMI: */ + ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret); + return ret; + } + } + + break; + case DRM_MODE_ENCODER_DSI: + /* only DSI1 supported for now */ + dsi_id = 0; + + if (!priv->dsi[dsi_id]) + break; + + encoder = mdp4_dsi_encoder_init(dev); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + DRM_DEV_ERROR(dev->dev, + "failed to construct DSI encoder: %d\n", ret); + return ret; + } + + /* TODO: Add DMA_S later? */ + encoder->possible_crtcs = 1 << DMA_P; + + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n", + ret); + return ret; + } + + break; + default: + DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n"); + return -EINVAL; + } + + return 0; +} + +static int modeset_init(struct mdp4_kms *mdp4_kms) +{ + struct drm_device *dev = mdp4_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_plane *plane; + struct drm_crtc *crtc; + int i, ret; + static const enum mdp4_pipe rgb_planes[] = { + RGB1, RGB2, + }; + static const enum mdp4_pipe vg_planes[] = { + VG1, VG2, + }; + static const enum mdp4_dma mdp4_crtcs[] = { + DMA_P, DMA_E, + }; + static const char * const mdp4_crtc_names[] = { + "DMA_P", "DMA_E", + }; + static const int mdp4_intfs[] = { + DRM_MODE_ENCODER_LVDS, + DRM_MODE_ENCODER_DSI, + DRM_MODE_ENCODER_TMDS, + }; + + /* construct non-private planes: */ + for (i = 0; i < ARRAY_SIZE(vg_planes); i++) { + plane = mdp4_plane_init(dev, vg_planes[i], false); + if (IS_ERR(plane)) { + DRM_DEV_ERROR(dev->dev, + "failed to construct plane for VG%d\n", i + 1); + ret = PTR_ERR(plane); + goto fail; + } + } + + for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) { + plane = mdp4_plane_init(dev, rgb_planes[i], true); + if (IS_ERR(plane)) { + DRM_DEV_ERROR(dev->dev, + "failed to construct plane for RGB%d\n", i + 1); + ret = PTR_ERR(plane); + goto fail; + } + + crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i, + mdp4_crtcs[i]); + if (IS_ERR(crtc)) { + DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n", + mdp4_crtc_names[i]); + ret = PTR_ERR(crtc); + goto fail; + } + + priv->crtcs[priv->num_crtcs++] = crtc; + } + + /* + * we currently set up two relatively fixed paths: + * + * LCDC/LVDS path: RGB1 -> DMA_P -> LCDC -> LVDS + * or + * DSI path: RGB1 -> DMA_P -> DSI1 -> DSI Panel + * + * DTV/HDMI path: RGB2 -> DMA_E -> DTV -> HDMI + */ + + for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) { + ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n", + i, ret); + goto fail; + } + } + + return 0; + +fail: + return ret; +} + +static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms, + u32 *major, u32 *minor) +{ + struct drm_device *dev = mdp4_kms->dev; + u32 version; + + mdp4_enable(mdp4_kms); + version = mdp4_read(mdp4_kms, REG_MDP4_VERSION); + mdp4_disable(mdp4_kms); + + *major = FIELD(version, MDP4_VERSION_MAJOR); + *minor = FIELD(version, MDP4_VERSION_MINOR); + + DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor); +} + +static int mdp4_kms_init(struct drm_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev); + struct msm_drm_private *priv = dev->dev_private; + struct mdp4_kms *mdp4_kms; + struct msm_kms *kms = NULL; + struct iommu_domain *iommu; + struct msm_gem_address_space *aspace; + int irq, ret; + u32 major, minor; + unsigned long max_clk; + + /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ + max_clk = 266667000; + + mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); + if (!mdp4_kms) { + DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n"); + return -ENOMEM; + } + + ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to init kms\n"); + goto fail; + } + + priv->kms = &mdp4_kms->base.base; + kms = priv->kms; + + mdp4_kms->dev = dev; + + mdp4_kms->mmio = msm_ioremap(pdev, NULL); + if (IS_ERR(mdp4_kms->mmio)) { + ret = PTR_ERR(mdp4_kms->mmio); + goto fail; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret); + goto fail; + } + + kms->irq = irq; + + /* NOTE: driver for this regulator still missing upstream.. use + * _get_exclusive() and ignore the error if it does not exist + * (and hope that the bootloader left it on for us) + */ + mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); + if (IS_ERR(mdp4_kms->vdd)) + mdp4_kms->vdd = NULL; + + if (mdp4_kms->vdd) { + ret = regulator_enable(mdp4_kms->vdd); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret); + goto fail; + } + } + + mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); + if (IS_ERR(mdp4_kms->clk)) { + DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n"); + ret = PTR_ERR(mdp4_kms->clk); + goto fail; + } + + mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk"); + if (IS_ERR(mdp4_kms->pclk)) + mdp4_kms->pclk = NULL; + + mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); + if (IS_ERR(mdp4_kms->axi_clk)) { + DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n"); + ret = PTR_ERR(mdp4_kms->axi_clk); + goto fail; + } + + clk_set_rate(mdp4_kms->clk, max_clk); + + read_mdp_hw_revision(mdp4_kms, &major, &minor); + + if (major != 4) { + DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + mdp4_kms->rev = minor; + + if (mdp4_kms->rev >= 2) { + mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); + if (IS_ERR(mdp4_kms->lut_clk)) { + DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n"); + ret = PTR_ERR(mdp4_kms->lut_clk); + goto fail; + } + clk_set_rate(mdp4_kms->lut_clk, max_clk); + } + + pm_runtime_enable(dev->dev); + mdp4_kms->rpm_enabled = true; + + /* make sure things are off before attaching iommu (bootloader could + * have left things on, in which case we'll start getting faults if + * we don't disable): + */ + mdp4_enable(mdp4_kms); + mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); + mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); + mdp4_disable(mdp4_kms); + mdelay(16); + + iommu = iommu_domain_alloc(pdev->dev.bus); + if (iommu) { + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu); + + aspace = msm_gem_address_space_create(mmu, + "mdp4", 0x1000, 0x100000000 - 0x1000); + + if (IS_ERR(aspace)) { + if (!IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + ret = PTR_ERR(aspace); + goto fail; + } + + kms->aspace = aspace; + } else { + DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys " + "contig buffers for scanout\n"); + aspace = NULL; + } + + ret = modeset_init(mdp4_kms); + if (ret) { + DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret); + goto fail; + } + + mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT); + if (IS_ERR(mdp4_kms->blank_cursor_bo)) { + ret = PTR_ERR(mdp4_kms->blank_cursor_bo); + DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); + mdp4_kms->blank_cursor_bo = NULL; + goto fail; + } + + ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace, + &mdp4_kms->blank_cursor_iova); + if (ret) { + DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret); + goto fail; + } + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 2048; + dev->mode_config.max_height = 2048; + + return 0; + +fail: + if (kms) + mdp4_destroy(kms); + + return ret; +} + +static const struct dev_pm_ops mdp4_pm_ops = { + .prepare = msm_pm_prepare, + .complete = msm_pm_complete, +}; + +static int mdp4_probe(struct platform_device *pdev) +{ + return msm_drv_probe(&pdev->dev, mdp4_kms_init); +} + +static int mdp4_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &msm_drm_ops); + + return 0; +} + +static const struct of_device_id mdp4_dt_match[] = { + { .compatible = "qcom,mdp4" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mdp4_dt_match); + +static struct platform_driver mdp4_platform_driver = { + .probe = mdp4_probe, + .remove = mdp4_remove, + .shutdown = msm_drv_shutdown, + .driver = { + .name = "mdp4", + .of_match_table = mdp4_dt_match, + .pm = &mdp4_pm_ops, + }, +}; + +void __init msm_mdp4_register(void) +{ + platform_driver_register(&mdp4_platform_driver); +} + +void __exit msm_mdp4_unregister(void) +{ + platform_driver_unregister(&mdp4_platform_driver); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h new file mode 100644 index 000000000..01179e764 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __MDP4_KMS_H__ +#define __MDP4_KMS_H__ + +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "disp/mdp_kms.h" +#include "mdp4.xml.h" + +struct device_node; + +struct mdp4_kms { + struct mdp_kms base; + + struct drm_device *dev; + + int rev; + + void __iomem *mmio; + + struct regulator *vdd; + + struct clk *clk; + struct clk *pclk; + struct clk *lut_clk; + struct clk *axi_clk; + + struct mdp_irq error_handler; + + bool rpm_enabled; + + /* empty/blank cursor bo to use when cursor is "disabled" */ + struct drm_gem_object *blank_cursor_bo; + uint64_t blank_cursor_iova; +}; +#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) + +static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data) +{ + msm_writel(data, mdp4_kms->mmio + reg); +} + +static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg) +{ + return msm_readl(mdp4_kms->mmio + reg); +} + +static inline uint32_t pipe2flush(enum mdp4_pipe pipe) +{ + switch (pipe) { + case VG1: return MDP4_OVERLAY_FLUSH_VG1; + case VG2: return MDP4_OVERLAY_FLUSH_VG2; + case RGB1: return MDP4_OVERLAY_FLUSH_RGB1; + case RGB2: return MDP4_OVERLAY_FLUSH_RGB2; + default: return 0; + } +} + +static inline uint32_t ovlp2flush(int ovlp) +{ + switch (ovlp) { + case 0: return MDP4_OVERLAY_FLUSH_OVLP0; + case 1: return MDP4_OVERLAY_FLUSH_OVLP1; + default: return 0; + } +} + +static inline uint32_t dma2irq(enum mdp4_dma dma) +{ + switch (dma) { + case DMA_P: return MDP4_IRQ_DMA_P_DONE; + case DMA_S: return MDP4_IRQ_DMA_S_DONE; + case DMA_E: return MDP4_IRQ_DMA_E_DONE; + default: return 0; + } +} + +static inline uint32_t dma2err(enum mdp4_dma dma) +{ + switch (dma) { + case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN; + case DMA_S: return 0; // ??? + case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN; + default: return 0; + } +} + +static inline uint32_t mixercfg(uint32_t mixer_cfg, int mixer, + enum mdp4_pipe pipe, enum mdp_mixer_stage_id stage) +{ + switch (pipe) { + case VG1: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE0(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1); + break; + case VG2: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE1(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1); + break; + case RGB1: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE2(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1); + break; + case RGB2: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE3(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1); + break; + case RGB3: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE4(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1); + break; + case VG3: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE5(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1); + break; + case VG4: + mixer_cfg &= ~(MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK | + MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); + mixer_cfg |= MDP4_LAYERMIXER_IN_CFG_PIPE6(stage) | + COND(mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1); + break; + default: + WARN(1, "invalid pipe"); + break; + } + + return mixer_cfg; +} + +int mdp4_disable(struct mdp4_kms *mdp4_kms); +int mdp4_enable(struct mdp4_kms *mdp4_kms); + +void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); +void mdp4_irq_preinstall(struct msm_kms *kms); +int mdp4_irq_postinstall(struct msm_kms *kms); +void mdp4_irq_uninstall(struct msm_kms *kms); +irqreturn_t mdp4_irq(struct msm_kms *kms); +int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); + +static inline uint32_t mdp4_pipe_caps(enum mdp4_pipe pipe) +{ + switch (pipe) { + case VG1: + case VG2: + case VG3: + case VG4: + return MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; + case RGB1: + case RGB2: + case RGB3: + return MDP_PIPE_CAP_SCALE; + default: + return 0; + } +} + +enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); +struct drm_plane *mdp4_plane_init(struct drm_device *dev, + enum mdp4_pipe pipe_id, bool private_plane); + +uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc); +void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); +void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); +void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc); +struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, + struct drm_plane *plane, int id, int ovlp_id, + enum mdp4_dma dma_id); + +long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate); +struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev); + +long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate); +struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, + struct device_node *panel_node); + +struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, + struct device_node *panel_node, struct drm_encoder *encoder); + +#ifdef CONFIG_DRM_MSM_DSI +struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev); +#else +static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#ifdef CONFIG_COMMON_CLK +struct clk *mpd4_lvds_pll_init(struct drm_device *dev); +#else +static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev) +{ + return ERR_PTR(-ENODEV); +} +#endif + +#endif /* __MDP4_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c new file mode 100644 index 000000000..10eb3e5b2 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark + * Author: Vinay Simha + */ + +#include + +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_lcdc_encoder { + struct drm_encoder base; + struct device_node *panel_node; + struct drm_panel *panel; + struct clk *lcdc_clk; + unsigned long int pixclock; + struct regulator *regs[3]; + bool enabled; + uint32_t bsc; +}; +#define to_mdp4_lcdc_encoder(x) container_of(x, struct mdp4_lcdc_encoder, base) + +static struct mdp4_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + drm_encoder_cleanup(encoder); + kfree(mdp4_lcdc_encoder); +} + +static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = { + .destroy = mdp4_lcdc_encoder_destroy, +}; + +/* this should probably be a helper: */ +static struct drm_connector *get_connector(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + if (connector->encoder == encoder) + return connector; + + return NULL; +} + +static void setup_phy(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct drm_connector *connector = get_connector(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t lvds_intf = 0, lvds_phy_cfg0 = 0; + int bpp, nchan, swap; + + if (!connector) + return; + + bpp = 3 * connector->display_info.bpc; + + if (!bpp) + bpp = 18; + + /* TODO, these should come from panel somehow: */ + nchan = 1; + swap = 0; + + switch (bpp) { + case 24: + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x08) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x05) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x04) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x03)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x02) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x01) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x00)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x11) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x10) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0d) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0c)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0b) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0a) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x09)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x15)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x14) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x13) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x12)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(3), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1b) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x17) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x16) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0f)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(3), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0e) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x07) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x06)); + if (nchan == 2) { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE3_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } else { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE3_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } + break; + + case 18: + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(0), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x0a) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x07) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x06) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x05)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(0), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x04) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x03) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x02)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(1), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x13) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x12) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x0f) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x0e)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(1), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x0d) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x0c) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x0b)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_3_TO_0(2), + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT0(0x1a) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT1(0x19) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT2(0x18) | + MDP4_LCDC_LVDS_MUX_CTL_3_TO_0_BIT3(0x17)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_MUX_CTL_6_TO_4(2), + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT4(0x16) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT5(0x15) | + MDP4_LCDC_LVDS_MUX_CTL_6_TO_4_BIT6(0x14)); + if (nchan == 2) { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH2_DATA_LANE0_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } else { + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE2_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE1_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_DATA_LANE0_EN; + } + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_RGB_OUT; + break; + + default: + DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp); + return; + } + + switch (nchan) { + case 1: + lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0; + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN | + MDP4_LCDC_LVDS_INTF_CTL_MODE_SEL; + break; + case 2: + lvds_phy_cfg0 = MDP4_LVDS_PHY_CFG0_CHANNEL0 | + MDP4_LVDS_PHY_CFG0_CHANNEL1; + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH2_CLK_LANE_EN | + MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN; + break; + default: + DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan); + return; + } + + if (swap) + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_CH_SWAP; + + lvds_intf |= MDP4_LCDC_LVDS_INTF_CTL_ENABLE; + + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_INTF_CTL, lvds_intf); + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG2, 0x30); + + mb(); + udelay(1); + lvds_phy_cfg0 |= MDP4_LVDS_PHY_CFG0_SERIALIZATION_ENBLE; + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, lvds_phy_cfg0); +} + +static void mdp4_lcdc_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + uint32_t lcdc_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + + mode = adjusted_mode; + + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); + + mdp4_lcdc_encoder->pixclock = mode->clock * 1000; + + DBG("pixclock=%lu", mdp4_lcdc_encoder->pixclock); + + ctrl_pol = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW; + /* probably need to get DATA_EN polarity from panel.. */ + + lcdc_hsync_skew = 0; /* get this from panel? */ + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + lcdc_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + lcdc_hsync_skew - 1; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_CTRL, + MDP4_LCDC_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP4_LCDC_HSYNC_CTRL_PERIOD(mode->htotal)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_PERIOD, vsync_period); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_VSYNC_LEN, vsync_len); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_HCTRL, + MDP4_LCDC_DISPLAY_HCTRL_START(hsync_start_x) | + MDP4_LCDC_DISPLAY_HCTRL_END(hsync_end_x)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VSTART, display_v_start); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_DISPLAY_VEND, display_v_end); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_BORDER_CLR, 0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_UNDERFLOW_CLR, + MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY | + MDP4_LCDC_UNDERFLOW_CLR_COLOR(0xff)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_HSYNC_SKEW, lcdc_hsync_skew); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_CTRL_POLARITY, ctrl_pol); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_HCTL, + MDP4_LCDC_ACTIVE_HCTL_START(0) | + MDP4_LCDC_ACTIVE_HCTL_END(0)); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VSTART, 0); + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ACTIVE_VEND, 0); +} + +static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel; + int i, ret; + + if (WARN_ON(!mdp4_lcdc_encoder->enabled)) + return; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); + + panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); + if (!IS_ERR(panel)) { + drm_panel_disable(panel); + drm_panel_unprepare(panel); + } + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_PRIMARY_VSYNC); + + clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_disable(mdp4_lcdc_encoder->regs[i]); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret); + } + + mdp4_lcdc_encoder->enabled = false; +} + +static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + unsigned long pc = mdp4_lcdc_encoder->pixclock; + struct mdp4_kms *mdp4_kms = get_kms(encoder); + struct drm_panel *panel; + uint32_t config; + int i, ret; + + if (WARN_ON(mdp4_lcdc_encoder->enabled)) + return; + + /* TODO: hard-coded for 18bpp: */ + config = + MDP4_DMA_CONFIG_R_BPC(BPC6) | + MDP4_DMA_CONFIG_G_BPC(BPC6) | + MDP4_DMA_CONFIG_B_BPC(BPC6) | + MDP4_DMA_CONFIG_PACK(0x21) | + MDP4_DMA_CONFIG_DEFLKR_EN | + MDP4_DMA_CONFIG_DITHER_EN; + + if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb")) + config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB; + + mdp4_crtc_set_config(encoder->crtc, config); + mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0); + + for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) { + ret = regulator_enable(mdp4_lcdc_encoder->regs[i]); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret); + } + + DBG("setting lcdc_clk=%lu", pc); + ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret); + ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret); + + panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node); + if (!IS_ERR(panel)) { + drm_panel_prepare(panel); + drm_panel_enable(panel); + } + + setup_phy(encoder); + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 1); + + mdp4_lcdc_encoder->enabled = true; +} + +static const struct drm_encoder_helper_funcs mdp4_lcdc_encoder_helper_funcs = { + .mode_set = mdp4_lcdc_encoder_mode_set, + .disable = mdp4_lcdc_encoder_disable, + .enable = mdp4_lcdc_encoder_enable, +}; + +long mdp4_lcdc_round_pixclk(struct drm_encoder *encoder, unsigned long rate) +{ + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder = + to_mdp4_lcdc_encoder(encoder); + return clk_round_rate(mdp4_lcdc_encoder->lcdc_clk, rate); +} + +/* initialize encoder */ +struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev, + struct device_node *panel_node) +{ + struct drm_encoder *encoder = NULL; + struct mdp4_lcdc_encoder *mdp4_lcdc_encoder; + struct regulator *reg; + int ret; + + mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL); + if (!mdp4_lcdc_encoder) { + ret = -ENOMEM; + goto fail; + } + + mdp4_lcdc_encoder->panel_node = panel_node; + + encoder = &mdp4_lcdc_encoder->base; + + drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs, + DRM_MODE_ENCODER_LVDS, NULL); + drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs); + + /* TODO: do we need different pll in other cases? */ + mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev); + if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) { + DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n"); + ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk); + goto fail; + } + + /* TODO: different regulators in other cases? */ + reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v"); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret); + goto fail; + } + mdp4_lcdc_encoder->regs[0] = reg; + + reg = devm_regulator_get(dev->dev, "lvds-pll-vdda"); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret); + goto fail; + } + mdp4_lcdc_encoder->regs[1] = reg; + + reg = devm_regulator_get(dev->dev, "lvds-vdda"); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret); + goto fail; + } + mdp4_lcdc_encoder->regs[2] = reg; + + return encoder; + +fail: + if (encoder) + mdp4_lcdc_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c new file mode 100644 index 000000000..7444b75c4 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark + * Author: Vinay Simha + */ + +#include "mdp4_kms.h" + +struct mdp4_lvds_connector { + struct drm_connector base; + struct drm_encoder *encoder; + struct device_node *panel_node; + struct drm_panel *panel; +}; +#define to_mdp4_lvds_connector(x) container_of(x, struct mdp4_lvds_connector, base) + +static enum drm_connector_status mdp4_lvds_connector_detect( + struct drm_connector *connector, bool force) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + + if (!mdp4_lvds_connector->panel) { + mdp4_lvds_connector->panel = + of_drm_find_panel(mdp4_lvds_connector->panel_node); + if (IS_ERR(mdp4_lvds_connector->panel)) + mdp4_lvds_connector->panel = NULL; + } + + return mdp4_lvds_connector->panel ? + connector_status_connected : + connector_status_disconnected; +} + +static void mdp4_lvds_connector_destroy(struct drm_connector *connector) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + + drm_connector_cleanup(connector); + + kfree(mdp4_lvds_connector); +} + +static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + struct drm_panel *panel = mdp4_lvds_connector->panel; + int ret = 0; + + if (panel) + ret = drm_panel_get_modes(panel, connector); + + return ret; +} + +static enum drm_mode_status +mdp4_lvds_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct mdp4_lvds_connector *mdp4_lvds_connector = + to_mdp4_lvds_connector(connector); + struct drm_encoder *encoder = mdp4_lvds_connector->encoder; + long actual, requested; + + requested = 1000 * mode->clock; + actual = mdp4_lcdc_round_pixclk(encoder, requested); + + DBG("requested=%ld, actual=%ld", requested, actual); + + if (actual != requested) + return MODE_CLOCK_RANGE; + + return MODE_OK; +} + +static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { + .detect = mdp4_lvds_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = mdp4_lvds_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { + .get_modes = mdp4_lvds_connector_get_modes, + .mode_valid = mdp4_lvds_connector_mode_valid, +}; + +/* initialize connector */ +struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, + struct device_node *panel_node, struct drm_encoder *encoder) +{ + struct drm_connector *connector = NULL; + struct mdp4_lvds_connector *mdp4_lvds_connector; + + mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL); + if (!mdp4_lvds_connector) + return ERR_PTR(-ENOMEM); + + mdp4_lvds_connector->encoder = encoder; + mdp4_lvds_connector->panel_node = panel_node; + + connector = &mdp4_lvds_connector->base; + + drm_connector_init(dev, connector, &mdp4_lvds_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + drm_connector_helper_add(connector, &mdp4_lvds_connector_helper_funcs); + + connector->polled = 0; + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + drm_connector_attach_encoder(connector, encoder); + + return connector; +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c new file mode 100644 index 000000000..ab8c0c187 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014 Red Hat + * Author: Rob Clark + */ + +#include +#include + +#include "mdp4_kms.h" + +struct mdp4_lvds_pll { + struct clk_hw pll_hw; + struct drm_device *dev; + unsigned long pixclk; +}; +#define to_mdp4_lvds_pll(x) container_of(x, struct mdp4_lvds_pll, pll_hw) + +static struct mdp4_kms *get_kms(struct mdp4_lvds_pll *lvds_pll) +{ + struct msm_drm_private *priv = lvds_pll->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +struct pll_rate { + unsigned long rate; + struct { + uint32_t val; + uint32_t reg; + } conf[32]; +}; + +/* NOTE: keep sorted highest freq to lowest: */ +static const struct pll_rate freqtbl[] = { + { 72000000, { + { 0x8f, REG_MDP4_LVDS_PHY_PLL_CTRL_1 }, + { 0x30, REG_MDP4_LVDS_PHY_PLL_CTRL_2 }, + { 0xc6, REG_MDP4_LVDS_PHY_PLL_CTRL_3 }, + { 0x10, REG_MDP4_LVDS_PHY_PLL_CTRL_5 }, + { 0x07, REG_MDP4_LVDS_PHY_PLL_CTRL_6 }, + { 0x62, REG_MDP4_LVDS_PHY_PLL_CTRL_7 }, + { 0x41, REG_MDP4_LVDS_PHY_PLL_CTRL_8 }, + { 0x0d, REG_MDP4_LVDS_PHY_PLL_CTRL_9 }, + { 0, 0 } } + }, +}; + +static const struct pll_rate *find_rate(unsigned long rate) +{ + int i; + for (i = 1; i < ARRAY_SIZE(freqtbl); i++) + if (rate > freqtbl[i].rate) + return &freqtbl[i-1]; + return &freqtbl[i-1]; +} + +static int mpd4_lvds_pll_enable(struct clk_hw *hw) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + struct mdp4_kms *mdp4_kms = get_kms(lvds_pll); + const struct pll_rate *pll_rate = find_rate(lvds_pll->pixclk); + int i; + + DBG("pixclk=%lu (%lu)", lvds_pll->pixclk, pll_rate->rate); + + if (WARN_ON(!pll_rate)) + return -EINVAL; + + mdp4_write(mdp4_kms, REG_MDP4_LCDC_LVDS_PHY_RESET, 0x33); + + for (i = 0; pll_rate->conf[i].reg; i++) + mdp4_write(mdp4_kms, pll_rate->conf[i].reg, pll_rate->conf[i].val); + + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x01); + + /* Wait until LVDS PLL is locked and ready */ + while (!mdp4_read(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_LOCKED)) + cpu_relax(); + + return 0; +} + +static void mpd4_lvds_pll_disable(struct clk_hw *hw) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + struct mdp4_kms *mdp4_kms = get_kms(lvds_pll); + + DBG(""); + + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_CFG0, 0x0); + mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0); +} + +static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + return lvds_pll->pixclk; +} + +static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + const struct pll_rate *pll_rate = find_rate(rate); + return pll_rate->rate; +} + +static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw); + lvds_pll->pixclk = rate; + return 0; +} + + +static const struct clk_ops mpd4_lvds_pll_ops = { + .enable = mpd4_lvds_pll_enable, + .disable = mpd4_lvds_pll_disable, + .recalc_rate = mpd4_lvds_pll_recalc_rate, + .round_rate = mpd4_lvds_pll_round_rate, + .set_rate = mpd4_lvds_pll_set_rate, +}; + +static const char *mpd4_lvds_pll_parents[] = { + "pxo", +}; + +static struct clk_init_data pll_init = { + .name = "mpd4_lvds_pll", + .ops = &mpd4_lvds_pll_ops, + .parent_names = mpd4_lvds_pll_parents, + .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents), +}; + +struct clk *mpd4_lvds_pll_init(struct drm_device *dev) +{ + struct mdp4_lvds_pll *lvds_pll; + struct clk *clk; + int ret; + + lvds_pll = devm_kzalloc(dev->dev, sizeof(*lvds_pll), GFP_KERNEL); + if (!lvds_pll) { + ret = -ENOMEM; + goto fail; + } + + lvds_pll->dev = dev; + + lvds_pll->pll_hw.init = &pll_init; + clk = devm_clk_register(dev->dev, &lvds_pll->pll_hw); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto fail; + } + + return clk; + +fail: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c new file mode 100644 index 000000000..b689b618d --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include +#include +#include +#include + +#include "mdp4_kms.h" + +#define DOWN_SCALE_MAX 8 +#define UP_SCALE_MAX 8 + +struct mdp4_plane { + struct drm_plane base; + const char *name; + + enum mdp4_pipe pipe; + + uint32_t caps; + uint32_t nformats; + uint32_t formats[32]; + + bool enabled; +}; +#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) + +/* MDP format helper functions */ +static inline +enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb) +{ + bool is_tile = false; + + if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) + is_tile = true; + + if (fb->format->format == DRM_FORMAT_NV12 && is_tile) + return FRAME_TILE_YCBCR_420; + + return FRAME_LINEAR; +} + +static void mdp4_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb); +static int mdp4_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); + +static struct mdp4_kms *get_kms(struct drm_plane *plane) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + return to_mdp4_kms(to_mdp_kms(priv->kms)); +} + +static void mdp4_plane_destroy(struct drm_plane *plane) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + + drm_plane_cleanup(plane); + + kfree(mdp4_plane); +} + +/* helper to install properties which are common to planes and crtcs */ +static void mdp4_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj) +{ + // XXX +} + +static int mdp4_plane_set_property(struct drm_plane *plane, + struct drm_property *property, uint64_t val) +{ + // XXX + return -EINVAL; +} + +static const struct drm_plane_funcs mdp4_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = mdp4_plane_destroy, + .set_property = mdp4_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int mdp4_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + struct msm_kms *kms = priv->kms; + + if (!new_state->fb) + return 0; + + drm_gem_plane_helper_prepare_fb(plane, new_state); + + return msm_framebuffer_prepare(new_state->fb, kms->aspace, false); +} + +static void mdp4_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + struct msm_kms *kms = &mdp4_kms->base.base; + struct drm_framebuffer *fb = old_state->fb; + + if (!fb) + return; + + DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); + msm_framebuffer_cleanup(fb, kms->aspace, false); +} + + +static int mdp4_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + return 0; +} + +static void mdp4_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + int ret; + + ret = mdp4_plane_mode_set(plane, + new_state->crtc, new_state->fb, + new_state->crtc_x, new_state->crtc_y, + new_state->crtc_w, new_state->crtc_h, + new_state->src_x, new_state->src_y, + new_state->src_w, new_state->src_h); + /* atomic_check should have ensured that this doesn't fail */ + WARN_ON(ret < 0); +} + +static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = { + .prepare_fb = mdp4_plane_prepare_fb, + .cleanup_fb = mdp4_plane_cleanup_fb, + .atomic_check = mdp4_plane_atomic_check, + .atomic_update = mdp4_plane_atomic_update, +}; + +static void mdp4_plane_set_scanout(struct drm_plane *plane, + struct drm_framebuffer *fb) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + struct msm_kms *kms = &mdp4_kms->base.base; + enum mdp4_pipe pipe = mdp4_plane->pipe; + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), + MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | + MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe), + MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | + MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 0)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 1)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 2)); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), + msm_framebuffer_iova(fb, kms->aspace, 3)); +} + +static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms, + enum mdp4_pipe pipe, struct csc_cfg *csc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(csc->matrix); i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_MV(pipe, i), + csc->matrix[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_bias) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_BV(pipe, i), + csc->pre_bias[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_BV(pipe, i), + csc->post_bias[i]); + } + + for (i = 0; i < ARRAY_SIZE(csc->post_clamp) ; i++) { + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_PRE_LV(pipe, i), + csc->pre_clamp[i]); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_CSC_POST_LV(pipe, i), + csc->post_clamp[i]); + } +} + +#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 + +static int mdp4_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_device *dev = plane->dev; + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + struct mdp4_kms *mdp4_kms = get_kms(plane); + enum mdp4_pipe pipe = mdp4_plane->pipe; + const struct mdp_format *format; + uint32_t op_mode = 0; + uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; + uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; + enum mdp4_frame_format frame_type; + + if (!(crtc && fb)) { + DBG("%s: disabled!", mdp4_plane->name); + return 0; + } + + frame_type = mdp4_get_frame_format(fb); + + /* src values are in Q16 fixed point, convert to integer: */ + src_x = src_x >> 16; + src_y = src_y >> 16; + src_w = src_w >> 16; + src_h = src_h >> 16; + + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp4_plane->name, + fb->base.id, src_x, src_y, src_w, src_h, + crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); + + format = to_mdp_format(msm_framebuffer_format(fb)); + + if (src_w > (crtc_w * DOWN_SCALE_MAX)) { + DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (src_h > (crtc_h * DOWN_SCALE_MAX)) { + DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_w > (src_w * UP_SCALE_MAX)) { + DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n"); + return -ERANGE; + } + + if (crtc_h > (src_h * UP_SCALE_MAX)) { + DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n"); + return -ERANGE; + } + + if (src_w != crtc_w) { + uint32_t sel_unit = SCALE_FIR; + op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN; + + if (MDP_FORMAT_IS_YUV(format)) { + if (crtc_w > src_w) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_w <= (src_w / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEX_UNIT_SEL(sel_unit); + phasex_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_w, crtc_w); + } + } + + if (src_h != crtc_h) { + uint32_t sel_unit = SCALE_FIR; + op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN; + + if (MDP_FORMAT_IS_YUV(format)) { + + if (crtc_h > src_h) + sel_unit = SCALE_PIXEL_RPT; + else if (crtc_h <= (src_h / 4)) + sel_unit = SCALE_MN_PHASE; + + op_mode |= MDP4_PIPE_OP_MODE_SCALEY_UNIT_SEL(sel_unit); + phasey_step = mult_frac(MDP4_VG_PHASE_STEP_DEFAULT, + src_h, crtc_h); + } + } + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe), + MDP4_PIPE_SRC_SIZE_WIDTH(src_w) | + MDP4_PIPE_SRC_SIZE_HEIGHT(src_h)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe), + MDP4_PIPE_SRC_XY_X(src_x) | + MDP4_PIPE_SRC_XY_Y(src_y)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe), + MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) | + MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), + MDP4_PIPE_DST_XY_X(crtc_x) | + MDP4_PIPE_DST_XY_Y(crtc_y)); + + mdp4_plane_set_scanout(plane, fb); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe), + MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | + MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | + MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | + MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) | + MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | + MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | + MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) | + COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), + MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | + MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | + MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | + MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + + if (MDP_FORMAT_IS_YUV(format)) { + struct csc_cfg *csc = mdp_get_default_csc_cfg(CSC_YUV2RGB); + + op_mode |= MDP4_PIPE_OP_MODE_SRC_YCBCR; + op_mode |= MDP4_PIPE_OP_MODE_CSC_EN; + mdp4_write_csc_config(mdp4_kms, pipe, csc); + } + + mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); + mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); + + if (frame_type != FRAME_LINEAR) + mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe), + MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) | + MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h)); + + return 0; +} + +static const char *pipe_names[] = { + "VG1", "VG2", + "RGB1", "RGB2", "RGB3", + "VG3", "VG4", +}; + +enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane) +{ + struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); + return mdp4_plane->pipe; +} + +static const uint64_t supported_format_modifiers[] = { + DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +/* initialize plane */ +struct drm_plane *mdp4_plane_init(struct drm_device *dev, + enum mdp4_pipe pipe_id, bool private_plane) +{ + struct drm_plane *plane = NULL; + struct mdp4_plane *mdp4_plane; + int ret; + enum drm_plane_type type; + + mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL); + if (!mdp4_plane) { + ret = -ENOMEM; + goto fail; + } + + plane = &mdp4_plane->base; + + mdp4_plane->pipe = pipe_id; + mdp4_plane->name = pipe_names[pipe_id]; + mdp4_plane->caps = mdp4_pipe_caps(pipe_id); + + mdp4_plane->nformats = mdp_get_formats(mdp4_plane->formats, + ARRAY_SIZE(mdp4_plane->formats), + !pipe_supports_yuv(mdp4_plane->caps)); + + type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, + mdp4_plane->formats, mdp4_plane->nformats, + supported_format_modifiers, type, NULL); + if (ret) + goto fail; + + drm_plane_helper_add(plane, &mdp4_plane_helper_funcs); + + mdp4_plane_install_properties(plane, &plane->base); + + drm_plane_enable_fb_damage_clips(plane); + + return plane; + +fail: + if (plane) + mdp4_plane_destroy(plane); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h new file mode 100644 index 000000000..86fc44b51 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h @@ -0,0 +1,1979 @@ +#ifndef MDP5_XML +#define MDP5_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mdp5_intf_type { + INTF_DISABLED = 0, + INTF_DSI = 1, + INTF_HDMI = 3, + INTF_LCDC = 5, + INTF_eDP = 9, + INTF_VIRTUAL = 100, + INTF_WB = 101, +}; + +enum mdp5_intfnum { + NO_INTF = 0, + INTF0 = 1, + INTF1 = 2, + INTF2 = 3, + INTF3 = 4, +}; + +enum mdp5_pipe { + SSPP_NONE = 0, + SSPP_VIG0 = 1, + SSPP_VIG1 = 2, + SSPP_VIG2 = 3, + SSPP_RGB0 = 4, + SSPP_RGB1 = 5, + SSPP_RGB2 = 6, + SSPP_DMA0 = 7, + SSPP_DMA1 = 8, + SSPP_VIG3 = 9, + SSPP_RGB3 = 10, + SSPP_CURSOR0 = 11, + SSPP_CURSOR1 = 12, +}; + +enum mdp5_format { + DUMMY = 0, +}; + +enum mdp5_ctl_mode { + MODE_NONE = 0, + MODE_WB_0_BLOCK = 1, + MODE_WB_1_BLOCK = 2, + MODE_WB_0_LINE = 3, + MODE_WB_1_LINE = 4, + MODE_WB_2_LINE = 5, +}; + +enum mdp5_pack_3d { + PACK_3D_FRAME_INT = 0, + PACK_3D_H_ROW_INT = 1, + PACK_3D_V_ROW_INT = 2, + PACK_3D_COL_INT = 3, +}; + +enum mdp5_scale_filter { + SCALE_FILTER_NEAREST = 0, + SCALE_FILTER_BIL = 1, + SCALE_FILTER_PCMN = 2, + SCALE_FILTER_CA = 3, +}; + +enum mdp5_pipe_bwc { + BWC_LOSSLESS = 0, + BWC_Q_HIGH = 1, + BWC_Q_MED = 2, +}; + +enum mdp5_cursor_format { + CURSOR_FMT_ARGB8888 = 0, + CURSOR_FMT_ARGB1555 = 2, + CURSOR_FMT_ARGB4444 = 4, +}; + +enum mdp5_cursor_alpha { + CURSOR_ALPHA_CONST = 0, + CURSOR_ALPHA_PER_PIXEL = 2, +}; + +enum mdp5_igc_type { + IGC_VIG = 0, + IGC_RGB = 1, + IGC_DMA = 2, + IGC_DSPP = 3, +}; + +enum mdp5_data_format { + DATA_FORMAT_RGB = 0, + DATA_FORMAT_YUV = 1, +}; + +enum mdp5_block_size { + BLOCK_SIZE_64 = 0, + BLOCK_SIZE_128 = 1, +}; + +enum mdp5_rotate_mode { + ROTATE_0 = 0, + ROTATE_90 = 1, +}; + +enum mdp5_chroma_downsample_method { + DS_MTHD_NO_PIXEL_DROP = 0, + DS_MTHD_PIXEL_DROP = 1, +}; + +#define MDP5_IRQ_WB_0_DONE 0x00000001 +#define MDP5_IRQ_WB_1_DONE 0x00000002 +#define MDP5_IRQ_WB_2_DONE 0x00000010 +#define MDP5_IRQ_PING_PONG_0_DONE 0x00000100 +#define MDP5_IRQ_PING_PONG_1_DONE 0x00000200 +#define MDP5_IRQ_PING_PONG_2_DONE 0x00000400 +#define MDP5_IRQ_PING_PONG_3_DONE 0x00000800 +#define MDP5_IRQ_PING_PONG_0_RD_PTR 0x00001000 +#define MDP5_IRQ_PING_PONG_1_RD_PTR 0x00002000 +#define MDP5_IRQ_PING_PONG_2_RD_PTR 0x00004000 +#define MDP5_IRQ_PING_PONG_3_RD_PTR 0x00008000 +#define MDP5_IRQ_PING_PONG_0_WR_PTR 0x00010000 +#define MDP5_IRQ_PING_PONG_1_WR_PTR 0x00020000 +#define MDP5_IRQ_PING_PONG_2_WR_PTR 0x00040000 +#define MDP5_IRQ_PING_PONG_3_WR_PTR 0x00080000 +#define MDP5_IRQ_PING_PONG_0_AUTO_REF 0x00100000 +#define MDP5_IRQ_PING_PONG_1_AUTO_REF 0x00200000 +#define MDP5_IRQ_PING_PONG_2_AUTO_REF 0x00400000 +#define MDP5_IRQ_PING_PONG_3_AUTO_REF 0x00800000 +#define MDP5_IRQ_INTF0_UNDER_RUN 0x01000000 +#define MDP5_IRQ_INTF0_VSYNC 0x02000000 +#define MDP5_IRQ_INTF1_UNDER_RUN 0x04000000 +#define MDP5_IRQ_INTF1_VSYNC 0x08000000 +#define MDP5_IRQ_INTF2_UNDER_RUN 0x10000000 +#define MDP5_IRQ_INTF2_VSYNC 0x20000000 +#define MDP5_IRQ_INTF3_UNDER_RUN 0x40000000 +#define MDP5_IRQ_INTF3_VSYNC 0x80000000 +#define REG_MDSS_HW_VERSION 0x00000000 +#define MDSS_HW_VERSION_STEP__MASK 0x0000ffff +#define MDSS_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t MDSS_HW_VERSION_STEP(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_STEP__SHIFT) & MDSS_HW_VERSION_STEP__MASK; +} +#define MDSS_HW_VERSION_MINOR__MASK 0x0fff0000 +#define MDSS_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDSS_HW_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_MINOR__SHIFT) & MDSS_HW_VERSION_MINOR__MASK; +} +#define MDSS_HW_VERSION_MAJOR__MASK 0xf0000000 +#define MDSS_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDSS_HW_VERSION_MAJOR__SHIFT) & MDSS_HW_VERSION_MAJOR__MASK; +} + +#define REG_MDSS_HW_INTR_STATUS 0x00000010 +#define MDSS_HW_INTR_STATUS_INTR_MDP 0x00000001 +#define MDSS_HW_INTR_STATUS_INTR_DSI0 0x00000010 +#define MDSS_HW_INTR_STATUS_INTR_DSI1 0x00000020 +#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 +#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 + +#define REG_MDP5_HW_VERSION 0x00000000 +#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff +#define MDP5_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val) +{ + return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK; +} +#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000 +#define MDP5_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val) +{ + return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK; +} +#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000 +#define MDP5_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK; +} + +#define REG_MDP5_DISP_INTF_SEL 0x00000004 +#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff +#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK; +} +#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 +#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK; +} +#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 +#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK; +} +#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000 +#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24 +static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) +{ + return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK; +} + +#define REG_MDP5_INTR_EN 0x00000010 + +#define REG_MDP5_INTR_STATUS 0x00000014 + +#define REG_MDP5_INTR_CLEAR 0x00000018 + +#define REG_MDP5_HIST_INTR_EN 0x0000001c + +#define REG_MDP5_HIST_INTR_STATUS 0x00000020 + +#define REG_MDP5_HIST_INTR_CLEAR 0x00000024 + +#define REG_MDP5_SPARE_0 0x00000028 +#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 + +static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; } + +static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; } +#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff +#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; +} +#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; +} +#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; +} + +static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; } + +static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; } +#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff +#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK; +} +#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 +#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK; +} +#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 +#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 +static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) +{ + return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK; +} + +static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) +{ + switch (idx) { + case IGC_VIG: return 0x00000200; + case IGC_RGB: return 0x00000210; + case IGC_DMA: return 0x00000220; + case IGC_DSPP: return 0x00000300; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); } + +static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; } +#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff +#define MDP5_IGC_LUT_REG_VAL__SHIFT 0 +static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val) +{ + return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK; +} +#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 +#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 + +#define REG_MDP5_SPLIT_DPL_EN 0x000002f4 + +#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 +#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 + +#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 +#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 +#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 +#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 + +static inline uint32_t __offset_CTL(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->ctl.base[0]); + case 1: return (mdp5_cfg->ctl.base[1]); + case 2: return (mdp5_cfg->ctl.base[2]); + case 3: return (mdp5_cfg->ctl.base[3]); + case 4: return (mdp5_cfg->ctl.base[4]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); } + +static inline uint32_t __offset_LAYER(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000000; + case 1: return 0x00000004; + case 2: return 0x00000008; + case 3: return 0x0000000c; + case 4: return 0x00000010; + case 5: return 0x00000024; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } + +static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); } +#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007 +#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG0__SHIFT) & MDP5_CTL_LAYER_REG_VIG0__MASK; +} +#define MDP5_CTL_LAYER_REG_VIG1__MASK 0x00000038 +#define MDP5_CTL_LAYER_REG_VIG1__SHIFT 3 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG1(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG1__SHIFT) & MDP5_CTL_LAYER_REG_VIG1__MASK; +} +#define MDP5_CTL_LAYER_REG_VIG2__MASK 0x000001c0 +#define MDP5_CTL_LAYER_REG_VIG2__SHIFT 6 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG2(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG2__SHIFT) & MDP5_CTL_LAYER_REG_VIG2__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB0__MASK 0x00000e00 +#define MDP5_CTL_LAYER_REG_RGB0__SHIFT 9 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB0(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB0__SHIFT) & MDP5_CTL_LAYER_REG_RGB0__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB1__MASK 0x00007000 +#define MDP5_CTL_LAYER_REG_RGB1__SHIFT 12 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB1(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB1__SHIFT) & MDP5_CTL_LAYER_REG_RGB1__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB2__MASK 0x00038000 +#define MDP5_CTL_LAYER_REG_RGB2__SHIFT 15 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB2(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB2__SHIFT) & MDP5_CTL_LAYER_REG_RGB2__MASK; +} +#define MDP5_CTL_LAYER_REG_DMA0__MASK 0x001c0000 +#define MDP5_CTL_LAYER_REG_DMA0__SHIFT 18 +static inline uint32_t MDP5_CTL_LAYER_REG_DMA0(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_DMA0__SHIFT) & MDP5_CTL_LAYER_REG_DMA0__MASK; +} +#define MDP5_CTL_LAYER_REG_DMA1__MASK 0x00e00000 +#define MDP5_CTL_LAYER_REG_DMA1__SHIFT 21 +static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_DMA1__SHIFT) & MDP5_CTL_LAYER_REG_DMA1__MASK; +} +#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000 +#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000 +#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000 +#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26 +static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK; +} +#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000 +#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29 +static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(uint32_t val) +{ + return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK; +} + +static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); } +#define MDP5_CTL_OP_MODE__MASK 0x0000000f +#define MDP5_CTL_OP_MODE__SHIFT 0 +static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val) +{ + return ((val) << MDP5_CTL_OP_MODE__SHIFT) & MDP5_CTL_OP_MODE__MASK; +} +#define MDP5_CTL_OP_INTF_NUM__MASK 0x00000070 +#define MDP5_CTL_OP_INTF_NUM__SHIFT 4 +static inline uint32_t MDP5_CTL_OP_INTF_NUM(enum mdp5_intfnum val) +{ + return ((val) << MDP5_CTL_OP_INTF_NUM__SHIFT) & MDP5_CTL_OP_INTF_NUM__MASK; +} +#define MDP5_CTL_OP_CMD_MODE 0x00020000 +#define MDP5_CTL_OP_PACK_3D_ENABLE 0x00080000 +#define MDP5_CTL_OP_PACK_3D__MASK 0x00300000 +#define MDP5_CTL_OP_PACK_3D__SHIFT 20 +static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val) +{ + return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK; +} + +static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); } +#define MDP5_CTL_FLUSH_VIG0 0x00000001 +#define MDP5_CTL_FLUSH_VIG1 0x00000002 +#define MDP5_CTL_FLUSH_VIG2 0x00000004 +#define MDP5_CTL_FLUSH_RGB0 0x00000008 +#define MDP5_CTL_FLUSH_RGB1 0x00000010 +#define MDP5_CTL_FLUSH_RGB2 0x00000020 +#define MDP5_CTL_FLUSH_LM0 0x00000040 +#define MDP5_CTL_FLUSH_LM1 0x00000080 +#define MDP5_CTL_FLUSH_LM2 0x00000100 +#define MDP5_CTL_FLUSH_LM3 0x00000200 +#define MDP5_CTL_FLUSH_LM4 0x00000400 +#define MDP5_CTL_FLUSH_DMA0 0x00000800 +#define MDP5_CTL_FLUSH_DMA1 0x00001000 +#define MDP5_CTL_FLUSH_DSPP0 0x00002000 +#define MDP5_CTL_FLUSH_DSPP1 0x00004000 +#define MDP5_CTL_FLUSH_DSPP2 0x00008000 +#define MDP5_CTL_FLUSH_WB 0x00010000 +#define MDP5_CTL_FLUSH_CTL 0x00020000 +#define MDP5_CTL_FLUSH_VIG3 0x00040000 +#define MDP5_CTL_FLUSH_RGB3 0x00080000 +#define MDP5_CTL_FLUSH_LM5 0x00100000 +#define MDP5_CTL_FLUSH_DSPP3 0x00200000 +#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000 +#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000 +#define MDP5_CTL_FLUSH_CHROMADOWN_0 0x04000000 +#define MDP5_CTL_FLUSH_TIMING_3 0x10000000 +#define MDP5_CTL_FLUSH_TIMING_2 0x20000000 +#define MDP5_CTL_FLUSH_TIMING_1 0x40000000 +#define MDP5_CTL_FLUSH_TIMING_0 0x80000000 + +static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); } + +static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); } + +static inline uint32_t __offset_LAYER_EXT(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000040; + case 1: return 0x00000044; + case 2: return 0x00000048; + case 3: return 0x0000004c; + case 4: return 0x00000050; + case 5: return 0x00000054; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_CTL_LAYER_EXT(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } + +static inline uint32_t REG_MDP5_CTL_LAYER_EXT_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER_EXT(i1); } +#define MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3 0x00000001 +#define MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3 0x00000004 +#define MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3 0x00000010 +#define MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3 0x00000040 +#define MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3 0x00000100 +#define MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3 0x00000400 +#define MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3 0x00001000 +#define MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3 0x00004000 +#define MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3 0x00010000 +#define MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3 0x00040000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK 0x00f00000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT 20 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR0(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR0__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR0__MASK; +} +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK 0x3c000000 +#define MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT 26 +static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id val) +{ + return ((val) << MDP5_CTL_LAYER_EXT_REG_CURSOR1__SHIFT) & MDP5_CTL_LAYER_EXT_REG_CURSOR1__MASK; +} + +static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) +{ + switch (idx) { + case SSPP_NONE: return (INVALID_IDX(idx)); + case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); + case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); + case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); + case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]); + case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]); + case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]); + case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]); + case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); + case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); + case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); + case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]); + case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_OP_MODE(enum mdp5_pipe i0) { return 0x00000200 + __offset_PIPE(i0); } +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00080000 +#define MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 19 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00040000 +#define MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 18 +static inline uint32_t MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(enum mdp5_data_format val) +{ + return ((val) << MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; +} +#define MDP5_PIPE_OP_MODE_CSC_1_EN 0x00020000 + +static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(enum mdp5_pipe i0) { return 0x00000320 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(enum mdp5_pipe i0) { return 0x00000324 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(enum mdp5_pipe i0) { return 0x00000328 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(enum mdp5_pipe i0) { return 0x0000032c + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31__MASK; +} +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT 16 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(enum mdp5_pipe i0) { return 0x00000330 + __offset_PIPE(i0); } +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff +#define MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000334 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_CLAMP_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000340 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH__MASK; +} +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__SHIFT) & MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_PRE_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x0000034c + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_PIPE_CSC_1_POST_BIAS_REG(enum mdp5_pipe i0, uint32_t i1) { return 0x00000358 + __offset_PIPE(i0) + 0x4*i1; } +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__SHIFT) & MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_SIZE_HEIGHT__MASK; +} +#define MDP5_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK; +} +#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000 +#define MDP5_PIPE_SRC_XY_Y__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_XY_Y__SHIFT) & MDP5_PIPE_SRC_XY_Y__MASK; +} +#define MDP5_PIPE_SRC_XY_X__MASK 0x0000ffff +#define MDP5_PIPE_SRC_XY_X__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); } +#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT) & MDP5_PIPE_OUT_SIZE_HEIGHT__MASK; +} +#define MDP5_PIPE_OUT_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); } +#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000 +#define MDP5_PIPE_OUT_XY_Y__SHIFT 16 +static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_XY_Y__SHIFT) & MDP5_PIPE_OUT_XY_Y__MASK; +} +#define MDP5_PIPE_OUT_XY_X__MASK 0x0000ffff +#define MDP5_PIPE_OUT_XY_X__SHIFT 0 +static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val) +{ + return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff +#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P0__MASK; +} +#define MDP5_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000 +#define MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff +#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P2__MASK; +} +#define MDP5_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000 +#define MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003 +#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_G_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c +#define MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT 2 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_B_BPC(enum mdp_bpc val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_B_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030 +#define MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT 4 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_R_BPC(enum mdp_bpc val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_R_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0 +#define MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT 6 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_A_BPC(enum mdp_bpc_alpha val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP5_PIPE_SRC_FORMAT_A_BPC__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100 +#define MDP5_PIPE_SRC_FORMAT_CPP__MASK 0x00000600 +#define MDP5_PIPE_SRC_FORMAT_CPP__SHIFT 9 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_CPP(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CPP__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_ROT90 0x00000800 +#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00003000 +#define MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 12 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000 +#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK 0x00180000 +#define MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT 19 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(enum mdp_fetch_type val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__SHIFT) & MDP5_PIPE_SRC_FORMAT_FETCH_TYPE__MASK; +} +#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK 0x01800000 +#define MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT 23 +static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp_chroma_samp_type val) +{ + return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff +#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM0__MASK; +} +#define MDP5_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00 +#define MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT 8 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM1(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM1__MASK; +} +#define MDP5_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000 +#define MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT 16 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM2(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM2__MASK; +} +#define MDP5_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000 +#define MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT 24 +static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val) +{ + return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); } +#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001 +#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006 +#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1 +static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val) +{ + return ((val) << MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT) & MDP5_PIPE_SRC_OP_MODE_BWC__MASK; +} +#define MDP5_PIPE_SRC_OP_MODE_FLIP_LR 0x00002000 +#define MDP5_PIPE_SRC_OP_MODE_FLIP_UD 0x00004000 +#define MDP5_PIPE_SRC_OP_MODE_IGC_EN 0x00010000 +#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_0 0x00020000 +#define MDP5_PIPE_SRC_OP_MODE_IGC_ROM_1 0x00040000 +#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000 +#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000 +#define MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE 0x80000000 + +static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); } +#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff +#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0 +static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val) +{ + return ((val) << MDP5_PIPE_DECIMATION_VERT__SHIFT) & MDP5_PIPE_DECIMATION_VERT__MASK; +} +#define MDP5_PIPE_DECIMATION_HORZ__MASK 0x0000ff00 +#define MDP5_PIPE_DECIMATION_HORZ__SHIFT 8 +static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val) +{ + return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK; +} + +static inline uint32_t __offset_SW_PIX_EXT(enum mdp_component_type idx) +{ + switch (idx) { + case COMP_0: return 0x00000100; + case COMP_1_2: return 0x00000110; + case COMP_3: return 0x00000120; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_LR(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000000 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK 0x000000ff +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK 0x0000ff00 +#define MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT 8 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK 0x00ff0000 +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK 0xff000000 +#define MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT 24 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_TB(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000004 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK 0x000000ff +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK 0x0000ff00 +#define MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT 8 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK 0x00ff0000 +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK 0xff000000 +#define MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT 24 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(int32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__SHIFT) & MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(enum mdp5_pipe i0, enum mdp_component_type i1) { return 0x00000008 + __offset_PIPE(i0) + __offset_SW_PIX_EXT(i1); } +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK 0x0000ffff +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT 0 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT__MASK; +} +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK 0xffff0000 +#define MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT 16 +static inline uint32_t MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(uint32_t val) +{ + return ((val) << MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__SHIFT) & MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); } +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK 0x00000300 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT 8 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK 0x00000c00 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT 10 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK 0x00003000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT 12 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK 0x0000c000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT 14 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK 0x00030000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT 16 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3__MASK; +} +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK 0x000c0000 +#define MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT 18 +static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(enum mdp5_scale_filter val) +{ + return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3__MASK; +} + +static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000218 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x0000021c + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); } + +static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); } + +static inline uint32_t __offset_LM(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->lm.base[0]); + case 1: return (mdp5_cfg->lm.base[1]); + case 2: return (mdp5_cfg->lm.base[2]); + case 3: return (mdp5_cfg->lm.base[3]); + case 4: return (mdp5_cfg->lm.base[4]); + case 5: return (mdp5_cfg->lm.base[5]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); } +#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080 +#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000 + +static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } +#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 +#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16 +static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val) +{ + return ((val) << MDP5_LM_OUT_SIZE_HEIGHT__SHIFT) & MDP5_LM_OUT_SIZE_HEIGHT__MASK; +} +#define MDP5_LM_OUT_SIZE_WIDTH__MASK 0x0000ffff +#define MDP5_LM_OUT_SIZE_WIDTH__SHIFT 0 +static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val) +{ + return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK; +} + +static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); } + +static inline uint32_t __offset_BLEND(uint32_t idx) +{ + switch (idx) { + case 0: return 0x00000020; + case 1: return 0x00000050; + case 2: return 0x00000080; + case 3: return 0x000000b0; + case 4: return 0x00000230; + case 5: return 0x00000260; + case 6: return 0x00000290; + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_LM(i0) + __offset_BLEND(i1); } +#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003 +#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0 +static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK; +} +#define MDP5_LM_BLEND_OP_MODE_FG_INV_ALPHA 0x00000004 +#define MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA 0x00000008 +#define MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA 0x00000010 +#define MDP5_LM_BLEND_OP_MODE_FG_TRANSP_EN 0x00000020 +#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK 0x00000300 +#define MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT 8 +static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val) +{ + return ((val) << MDP5_LM_BLEND_OP_MODE_BG_ALPHA__SHIFT) & MDP5_LM_BLEND_OP_MODE_BG_ALPHA__MASK; +} +#define MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA 0x00000400 +#define MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA 0x00000800 +#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000 +#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000 + +static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000001c + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + __offset_BLEND(i1); } + +static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_W__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_W__MASK; +} +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_IMG_SIZE_SRC_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_IMG_SIZE_SRC_H__SHIFT) & MDP5_LM_CURSOR_IMG_SIZE_SRC_H__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_SIZE_ROI_W__MASK 0x0000ffff +#define MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_W(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_W__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_W__MASK; +} +#define MDP5_LM_CURSOR_SIZE_ROI_H__MASK 0xffff0000 +#define MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_SIZE_ROI_H(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_SIZE_ROI_H__SHIFT) & MDP5_LM_CURSOR_SIZE_ROI_H__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_XY_SRC_X__MASK 0x0000ffff +#define MDP5_LM_CURSOR_XY_SRC_X__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_X(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_X__SHIFT) & MDP5_LM_CURSOR_XY_SRC_X__MASK; +} +#define MDP5_LM_CURSOR_XY_SRC_Y__MASK 0xffff0000 +#define MDP5_LM_CURSOR_XY_SRC_Y__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_XY_SRC_Y(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_XY_SRC_Y__SHIFT) & MDP5_LM_CURSOR_XY_SRC_Y__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); } +#define MDP5_LM_CURSOR_STRIDE_STRIDE__MASK 0x0000ffff +#define MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_STRIDE_STRIDE(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_STRIDE_STRIDE__SHIFT) & MDP5_LM_CURSOR_STRIDE_STRIDE__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); } +#define MDP5_LM_CURSOR_FORMAT_FORMAT__MASK 0x00000007 +#define MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_FORMAT_FORMAT(enum mdp5_cursor_format val) +{ + return ((val) << MDP5_LM_CURSOR_FORMAT_FORMAT__SHIFT) & MDP5_LM_CURSOR_FORMAT_FORMAT__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_START_XY_X_START__MASK 0x0000ffff +#define MDP5_LM_CURSOR_START_XY_X_START__SHIFT 0 +static inline uint32_t MDP5_LM_CURSOR_START_XY_X_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_X_START__SHIFT) & MDP5_LM_CURSOR_START_XY_X_START__MASK; +} +#define MDP5_LM_CURSOR_START_XY_Y_START__MASK 0xffff0000 +#define MDP5_LM_CURSOR_START_XY_Y_START__SHIFT 16 +static inline uint32_t MDP5_LM_CURSOR_START_XY_Y_START(uint32_t val) +{ + return ((val) << MDP5_LM_CURSOR_START_XY_Y_START__SHIFT) & MDP5_LM_CURSOR_START_XY_Y_START__MASK; +} + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); } +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN 0x00000001 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK 0x00000006 +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT 1 +static inline uint32_t MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(enum mdp5_cursor_alpha val) +{ + return ((val) << MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__SHIFT) & MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL__MASK; +} +#define MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN 0x00000008 + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); } + +static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); } + +static inline uint32_t __offset_DSPP(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->dspp.base[0]); + case 1: return (mdp5_cfg->dspp.base[1]); + case 2: return (mdp5_cfg->dspp.base[2]); + case 3: return (mdp5_cfg->dspp.base[3]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); } +#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001 +#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e +#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1 +static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val) +{ + return ((val) << MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT) & MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK; +} +#define MDP5_DSPP_OP_MODE_PCC_EN 0x00000010 +#define MDP5_DSPP_OP_MODE_DITHER_EN 0x00000100 +#define MDP5_DSPP_OP_MODE_HIST_EN 0x00010000 +#define MDP5_DSPP_OP_MODE_AUTO_CLEAR 0x00020000 +#define MDP5_DSPP_OP_MODE_HIST_LUT_EN 0x00080000 +#define MDP5_DSPP_OP_MODE_PA_EN 0x00100000 +#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000 +#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000 + +static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); } + +static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); } + +static inline uint32_t __offset_PP(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->pp.base[0]); + case 1: return (mdp5_cfg->pp.base[1]); + case 2: return (mdp5_cfg->pp.base[2]); + case 3: return (mdp5_cfg->pp.base[3]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_PP(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_TEAR_CHECK_EN(uint32_t i0) { return 0x00000000 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_VSYNC(uint32_t i0) { return 0x00000004 + __offset_PP(i0); } +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK 0x0007ffff +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__SHIFT) & MDP5_PP_SYNC_CONFIG_VSYNC_COUNT__MASK; +} +#define MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN 0x00080000 +#define MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN 0x00100000 + +static inline uint32_t REG_MDP5_PP_SYNC_CONFIG_HEIGHT(uint32_t i0) { return 0x00000008 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_SYNC_WRCOUNT(uint32_t i0) { return 0x0000000c + __offset_PP(i0); } +#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK 0x0000ffff +#define MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_WRCOUNT_LINE_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_LINE_COUNT__MASK; +} +#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK 0xffff0000 +#define MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT 16 +static inline uint32_t MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__SHIFT) & MDP5_PP_SYNC_WRCOUNT_FRAME_COUNT__MASK; +} + +static inline uint32_t REG_MDP5_PP_VSYNC_INIT_VAL(uint32_t i0) { return 0x00000010 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_INT_COUNT_VAL(uint32_t i0) { return 0x00000014 + __offset_PP(i0); } +#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK 0x0000ffff +#define MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT 0 +static inline uint32_t MDP5_PP_INT_COUNT_VAL_LINE_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_INT_COUNT_VAL_LINE_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_LINE_COUNT__MASK; +} +#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK 0xffff0000 +#define MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT 16 +static inline uint32_t MDP5_PP_INT_COUNT_VAL_FRAME_COUNT(uint32_t val) +{ + return ((val) << MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__SHIFT) & MDP5_PP_INT_COUNT_VAL_FRAME_COUNT__MASK; +} + +static inline uint32_t REG_MDP5_PP_SYNC_THRESH(uint32_t i0) { return 0x00000018 + __offset_PP(i0); } +#define MDP5_PP_SYNC_THRESH_START__MASK 0x0000ffff +#define MDP5_PP_SYNC_THRESH_START__SHIFT 0 +static inline uint32_t MDP5_PP_SYNC_THRESH_START(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_THRESH_START__SHIFT) & MDP5_PP_SYNC_THRESH_START__MASK; +} +#define MDP5_PP_SYNC_THRESH_CONTINUE__MASK 0xffff0000 +#define MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT 16 +static inline uint32_t MDP5_PP_SYNC_THRESH_CONTINUE(uint32_t val) +{ + return ((val) << MDP5_PP_SYNC_THRESH_CONTINUE__SHIFT) & MDP5_PP_SYNC_THRESH_CONTINUE__MASK; +} + +static inline uint32_t REG_MDP5_PP_START_POS(uint32_t i0) { return 0x0000001c + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_RD_PTR_IRQ(uint32_t i0) { return 0x00000020 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_WR_PTR_IRQ(uint32_t i0) { return 0x00000024 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_OUT_LINE_COUNT(uint32_t i0) { return 0x00000028 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_PP_LINE_COUNT(uint32_t i0) { return 0x0000002c + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_AUTOREFRESH_CONFIG(uint32_t i0) { return 0x00000030 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_MODE(uint32_t i0) { return 0x00000034 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x00000038 + __offset_PP(i0); } + +static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); } + +static inline uint32_t __offset_WB(uint32_t idx) +{ + switch (idx) { +#if 0 /* TEMPORARY until patch that adds wb.base[] is merged */ + case 0: return (mdp5_cfg->wb.base[0]); + case 1: return (mdp5_cfg->wb.base[1]); + case 2: return (mdp5_cfg->wb.base[2]); + case 3: return (mdp5_cfg->wb.base[3]); + case 4: return (mdp5_cfg->wb.base[4]); +#endif + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); } +#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003 +#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c +#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030 +#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0 +#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6 +static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK; +} +#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100 +#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600 +#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK; +} +#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000 +#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12 +static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000 +#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000 +#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000 +#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000 +#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19 +static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK; +} +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000 +#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26 +static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK; +} +#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000 +#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30 +static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); } +#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001 +#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006 +#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1 +static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK; +} +#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010 +#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4 +static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK; +} +#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020 +#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5 +static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK; +} +#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040 +#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100 +#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200 +#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9 +static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400 +#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10 +static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK; +} +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000 +#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14 +static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val) +{ + return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); } +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK; +} +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000 +#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24 +static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val) +{ + return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); } +#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff +#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0 +static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK; +} +#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000 +#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16 +static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); } +#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff +#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0 +static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK; +} +#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000 +#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16 +static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val) +{ + return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK; +} + +static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); } +#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff +#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0 +static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val) +{ + return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK; +} +#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000 +#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16 +static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val) +{ + return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK; +} + +static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); } + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK; +} +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000 +#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); } +#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff +#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK; +} +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK; +} +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00 +#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8 +static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK; +} + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } + +static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; } +#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff +#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0 +static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val) +{ + return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK; +} + +static inline uint32_t __offset_INTF(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->intf.base[0]); + case 1: return (mdp5_cfg->intf.base[1]); + case 2: return (mdp5_cfg->intf.base[2]); + case 3: return (mdp5_cfg->intf.base[3]); + case 4: return (mdp5_cfg->intf.base[4]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); } +#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff +#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0 +static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val) +{ + return ((val) << MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT) & MDP5_INTF_HSYNC_CTL_PULSEW__MASK; +} +#define MDP5_INTF_HSYNC_CTL_PERIOD__MASK 0xffff0000 +#define MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT 16 +static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val) +{ + return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK; +} + +static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); } +#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff +#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0 +static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK; +} +#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000 + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); } +#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff +#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0 +static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK; +} + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); } +#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff +#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0 +static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val) +{ + return ((val) << MDP5_INTF_DISPLAY_HCTL_START__SHIFT) & MDP5_INTF_DISPLAY_HCTL_START__MASK; +} +#define MDP5_INTF_DISPLAY_HCTL_END__MASK 0xffff0000 +#define MDP5_INTF_DISPLAY_HCTL_END__SHIFT 16 +static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val) +{ + return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK; +} + +static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); } +#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff +#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0 +static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_HCTL_START__SHIFT) & MDP5_INTF_ACTIVE_HCTL_START__MASK; +} +#define MDP5_INTF_ACTIVE_HCTL_END__MASK 0x7fff0000 +#define MDP5_INTF_ACTIVE_HCTL_END__SHIFT 16 +static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val) +{ + return ((val) << MDP5_INTF_ACTIVE_HCTL_END__SHIFT) & MDP5_INTF_ACTIVE_HCTL_END__MASK; +} +#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000 + +static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); } +#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001 +#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002 +#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004 + +static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); } + +static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); } + +static inline uint32_t __offset_AD(uint32_t idx) +{ + switch (idx) { + case 0: return (mdp5_cfg->ad.base[0]); + case 1: return (mdp5_cfg->ad.base[1]); + default: return INVALID_IDX(idx); + } +} +static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); } + +static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); } + + +#endif /* MDP5_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c new file mode 100644 index 000000000..1f1555aa0 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -0,0 +1,1333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. + */ + +#include "mdp5_kms.h" +#include "mdp5_cfg.h" + +struct mdp5_cfg_handler { + int revision; + struct mdp5_cfg config; +}; + +/* mdp5_cfg must be exposed (used in mdp5.xml.h) */ +const struct mdp5_cfg_hw *mdp5_cfg = NULL; + +static const struct mdp5_cfg_hw msm8x74v1_config = { + .name = "msm8x74v1", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 22, + .mmb_size = 4096, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, + .flush_hw_mask = 0x0003ffff, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01100, 0x01500, 0x01900 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + 0, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01d00, 0x02100, 0x02500 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + 0, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02900, 0x02d00 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + 0, + }, + .lm = { + .count = 5, + .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 3, + .base = { 0x04500, 0x04900, 0x04d00 }, + }, + .pp = { + .count = 3, + .base = { 0x21a00, 0x21b00, 0x21c00 }, + }, + .intf = { + .base = { 0x21000, 0x21200, 0x21400, 0x21600 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .perf = { + .ab_inefficiency = 200, + .ib_inefficiency = 120, + .clk_inefficiency = 125 + }, + .max_clk = 200000000, +}; + +static const struct mdp5_cfg_hw msm8x74v2_config = { + .name = "msm8x74", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 22, + .mmb_size = 4096, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, [SSPP_VIG2] = 7, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, [SSPP_RGB2] = 18, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, + .flush_hw_mask = 0x0003ffff, + }, + .pipe_vig = { + .count = 3, + .base = { 0x01100, 0x01500, 0x01900 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 3, + .base = { 0x01d00, 0x02100, 0x02500 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x02900, 0x02d00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 5, + .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 3, + .base = { 0x04500, 0x04900, 0x04d00 }, + }, + .ad = { + .count = 2, + .base = { 0x13000, 0x13200 }, + }, + .pp = { + .count = 3, + .base = { 0x12c00, 0x12d00, 0x12e00 }, + }, + .intf = { + .base = { 0x12400, 0x12600, 0x12800, 0x12a00 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .perf = { + .ab_inefficiency = 200, + .ib_inefficiency = 120, + .clk_inefficiency = 125 + }, + .max_clk = 320000000, +}; + +static const struct mdp5_cfg_hw apq8084_config = { + .name = "apq8084", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + MDP_CAP_SRC_SPLIT | + 0, + }, + .smp = { + .mmb_count = 44, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, + [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, + [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, + }, + .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */ + .reserved = { + /* Two SMP blocks are statically tied to RGB pipes: */ + [16] = 2, [17] = 2, [18] = 2, [22] = 2, + }, + }, + .ctl = { + .count = 5, + .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 }, + .flush_hw_mask = 0x003fffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x01100, 0x01500, 0x01900, 0x01d00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x02100, 0x02500, 0x02900, 0x02d00 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x03100, 0x03500 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 6, + .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = 3, + .caps = MDP_LM_CAP_DISPLAY, }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 4, + .base = { 0x05100, 0x05500, 0x05900, 0x05d00 }, + + }, + .ad = { + .count = 3, + .base = { 0x13400, 0x13600, 0x13800 }, + }, + .pp = { + .count = 4, + .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 }, + }, + .intf = { + .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .perf = { + .ab_inefficiency = 200, + .ib_inefficiency = 120, + .clk_inefficiency = 105 + }, + .max_clk = 320000000, +}; + +static const struct mdp5_cfg_hw msm8x16_config = { + .name = "msm8x16", + .mdp = { + .count = 1, + .base = { 0x0 }, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 8, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0x4003ffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 2, /* LM0 and LM3 */ + .base = { 0x44000, 0x47000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .intf = { + .base = { 0x00000, 0x6a800 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + }, + }, + .perf = { + .ab_inefficiency = 100, + .ib_inefficiency = 200, + .clk_inefficiency = 105 + }, + .max_clk = 320000000, +}; + +static const struct mdp5_cfg_hw msm8x36_config = { + .name = "msm8x36", + .mdp = { + .count = 1, + .base = { 0x0 }, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 8, + .mmb_size = 10240, + .clients = { + [SSPP_VIG0] = 1, [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0x4003ffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 2, + .base = { 0x44000, 0x47000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .pp = { + .count = 1, + .base = { 0x70000 }, + }, + .ad = { + .count = 1, + .base = { 0x78000 }, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + }, + .intf = { + .base = { 0x00000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .perf = { + .ab_inefficiency = 100, + .ib_inefficiency = 200, + .clk_inefficiency = 105 + }, + .max_clk = 366670000, +}; + +static const struct mdp5_cfg_hw msm8x94_config = { + .name = "msm8x94", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + MDP_CAP_SRC_SPLIT | + 0, + }, + .smp = { + .mmb_count = 44, + .mmb_size = 8192, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 4, + [SSPP_VIG2] = 7, [SSPP_VIG3] = 19, + [SSPP_DMA0] = 10, [SSPP_DMA1] = 13, + [SSPP_RGB0] = 16, [SSPP_RGB1] = 17, + [SSPP_RGB2] = 18, [SSPP_RGB3] = 22, + }, + .reserved_state[0] = GENMASK(23, 0), /* first 24 MMBs */ + .reserved = { + [1] = 1, [4] = 1, [7] = 1, [19] = 1, + [16] = 5, [17] = 5, [18] = 5, [22] = 5, + }, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0xf0ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 2, + .base = { 0x24000, 0x26000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 6, + .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = 3, + .caps = MDP_LM_CAP_DISPLAY, }, + }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 4, + .base = { 0x54000, 0x56000, 0x58000, 0x5a000 }, + + }, + .ad = { + .count = 3, + .base = { 0x78000, 0x78800, 0x79000 }, + }, + .pp = { + .count = 4, + .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .perf = { + .ab_inefficiency = 100, + .ib_inefficiency = 100, + .clk_inefficiency = 105 + }, + .max_clk = 400000000, +}; + +static const struct mdp5_cfg_hw msm8x96_config = { + .name = "msm8x96", + .mdp = { + .count = 1, + .caps = MDP_CAP_DSC | + MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0xf4ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 2, + .base = { 0x24000, 0x26000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 2, + .base = { 0x34000, 0x36000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 6, + .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY, }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 2, + .base = { 0x54000, 0x56000 }, + }, + .ad = { + .count = 3, + .base = { 0x78000, 0x78800, 0x79000 }, + }, + .pp = { + .count = 4, + .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .dsc = { + .count = 2, + .base = { 0x80000, 0x80400 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .perf = { + .ab_inefficiency = 100, + .ib_inefficiency = 200, + .clk_inefficiency = 105 + }, + .max_clk = 412500000, +}; + +const struct mdp5_cfg_hw msm8x76_config = { + .name = "msm8x76", + .mdp = { + .count = 1, + .caps = MDP_CAP_SMP | + MDP_CAP_DSC | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0xffffffff, + }, + .smp = { + .mmb_count = 10, + .mmb_size = 10240, + .clients = { + [SSPP_VIG0] = 1, [SSPP_VIG1] = 9, + [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .pipe_vig = { + .count = 2, + .base = { 0x04000, 0x06000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x440DC }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + .lm = { + .count = 2, + .base = { 0x44000, 0x45000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .pp = { + .count = 3, + .base = { 0x70000, 0x70800, 0x72000 }, + }, + .dsc = { + .count = 2, + .base = { 0x80000, 0x80400 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .max_clk = 360000000, +}; + +static const struct mdp5_cfg_hw msm8x53_config = { + .name = "msm8x53", + .mdp = { + .count = 1, + .caps = MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0xffffffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x34000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 3, + .base = { 0x44000, 0x45000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR }, + { .id = 1, .pp = 1, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .pp = { + .count = 2, + .base = { 0x70000, 0x70800 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .perf = { + .ab_inefficiency = 100, + .ib_inefficiency = 200, + .clk_inefficiency = 105 + }, + .max_clk = 400000000, +}; + +static const struct mdp5_cfg_hw msm8917_config = { + .name = "msm8917", + .mdp = { + .count = 1, + .caps = MDP_CAP_CDM, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0xffffffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x34000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 2, + .base = { 0x44000, 0x45000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .pp = { + .count = 1, + .base = { 0x70000 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + }, + }, + .max_clk = 320000000, +}; + +static const struct mdp5_cfg_hw msm8998_config = { + .name = "msm8998", + .mdp = { + .count = 1, + .caps = MDP_CAP_DSC | + MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0xf7ffffff, + }, + .pipe_vig = { + .count = 4, + .base = { 0x04000, 0x06000, 0x08000, 0x0a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 2, /* driver supports max of 2 currently */ + .base = { 0x24000, 0x26000, 0x28000, 0x2a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 2, + .base = { 0x34000, 0x36000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 6, + .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY, }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 2, + .base = { 0x54000, 0x56000 }, + }, + .ad = { + .count = 3, + .base = { 0x78000, 0x78800, 0x79000 }, + }, + .pp = { + .count = 4, + .base = { 0x70000, 0x70800, 0x71000, 0x71800 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .dsc = { + .count = 2, + .base = { 0x80000, 0x80400 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 }, + .connect = { + [0] = INTF_eDP, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 412500000, +}; + +static const struct mdp5_cfg_hw sdm630_config = { + .name = "sdm630", + .mdp = { + .count = 1, + .caps = MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0xf4ffffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 2, /* driver supports max of 2 currently */ + .base = { 0x24000, 0x26000, 0x28000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x34000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 2, + .base = { 0x44000, 0x46000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + }, + .nb_stages = 8, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + }, + .ad = { + .count = 2, + .base = { 0x78000, 0x78800 }, + }, + .pp = { + .count = 3, + .base = { 0x70000, 0x71000, 0x72000 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + }, + }, + .max_clk = 412500000, +}; + +static const struct mdp5_cfg_hw sdm660_config = { + .name = "sdm660", + .mdp = { + .count = 1, + .caps = MDP_CAP_DSC | + MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT | + 0, + }, + .ctl = { + .count = 5, + .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 }, + .flush_hw_mask = 0xf4ffffff, + }, + .pipe_vig = { + .count = 2, + .base = { 0x04000, 0x6000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 4, + .base = { 0x14000, 0x16000, 0x18000, 0x1a000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 2, /* driver supports max of 2 currently */ + .base = { 0x24000, 0x26000, 0x28000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x34000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 4, + .base = { 0x44000, 0x45000, 0x46000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = 3, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 2, + .base = { 0x54000, 0x56000 }, + }, + .ad = { + .count = 2, + .base = { 0x78000, 0x78800 }, + }, + .pp = { + .count = 5, + .base = { 0x70000, 0x70800, 0x71000, 0x71800, 0x72000 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .dsc = { + .count = 2, + .base = { 0x80000, 0x80400 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + [3] = INTF_HDMI, + }, + }, + .max_clk = 412500000, +}; + +static const struct mdp5_cfg_handler cfg_handlers_v1[] = { + { .revision = 0, .config = { .hw = &msm8x74v1_config } }, + { .revision = 2, .config = { .hw = &msm8x74v2_config } }, + { .revision = 3, .config = { .hw = &apq8084_config } }, + { .revision = 6, .config = { .hw = &msm8x16_config } }, + { .revision = 8, .config = { .hw = &msm8x36_config } }, + { .revision = 9, .config = { .hw = &msm8x94_config } }, + { .revision = 7, .config = { .hw = &msm8x96_config } }, + { .revision = 11, .config = { .hw = &msm8x76_config } }, + { .revision = 15, .config = { .hw = &msm8917_config } }, + { .revision = 16, .config = { .hw = &msm8x53_config } }, +}; + +static const struct mdp5_cfg_handler cfg_handlers_v3[] = { + { .revision = 0, .config = { .hw = &msm8998_config } }, + { .revision = 2, .config = { .hw = &sdm660_config } }, + { .revision = 3, .config = { .hw = &sdm630_config } }, +}; + +const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler) +{ + return cfg_handler->config.hw; +} + +struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler) +{ + return &cfg_handler->config; +} + +int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler) +{ + return cfg_handler->revision; +} + +void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler) +{ + kfree(cfg_handler); +} + +struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, + uint32_t major, uint32_t minor) +{ + struct drm_device *dev = mdp5_kms->dev; + struct mdp5_cfg_handler *cfg_handler; + const struct mdp5_cfg_handler *cfg_handlers; + int i, ret = 0, num_handlers; + + cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL); + if (unlikely(!cfg_handler)) { + ret = -ENOMEM; + goto fail; + } + + switch (major) { + case 1: + cfg_handlers = cfg_handlers_v1; + num_handlers = ARRAY_SIZE(cfg_handlers_v1); + break; + case 3: + cfg_handlers = cfg_handlers_v3; + num_handlers = ARRAY_SIZE(cfg_handlers_v3); + break; + default: + DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + /* only after mdp5_cfg global pointer's init can we access the hw */ + for (i = 0; i < num_handlers; i++) { + if (cfg_handlers[i].revision != minor) + continue; + mdp5_cfg = cfg_handlers[i].config.hw; + + break; + } + if (unlikely(!mdp5_cfg)) { + DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n", + major, minor); + ret = -ENXIO; + goto fail; + } + + cfg_handler->revision = minor; + cfg_handler->config.hw = mdp5_cfg; + + DBG("MDP5: %s hw config selected", mdp5_cfg->name); + + return cfg_handler; + +fail: + if (cfg_handler) + mdp5_cfg_destroy(cfg_handler); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h new file mode 100644 index 000000000..c2502cc33 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + */ + +#ifndef __MDP5_CFG_H__ +#define __MDP5_CFG_H__ + +#include "msm_drv.h" + +/* + * mdp5_cfg + * + * This module configures the dynamic offsets used by mdp5.xml.h + * (initialized in mdp5_cfg.c) + */ +extern const struct mdp5_cfg_hw *mdp5_cfg; + +#define MAX_CTL 8 +#define MAX_BASES 8 +#define MAX_SMP_BLOCKS 44 +#define MAX_CLIENTS 32 + +typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); + +#define MDP5_SUB_BLOCK_DEFINITION \ + unsigned int count; \ + uint32_t base[MAX_BASES] + +struct mdp5_sub_block { + MDP5_SUB_BLOCK_DEFINITION; +}; + +struct mdp5_lm_instance { + int id; + int pp; + int dspp; + uint32_t caps; +}; + +struct mdp5_lm_block { + MDP5_SUB_BLOCK_DEFINITION; + struct mdp5_lm_instance instances[MAX_BASES]; + uint32_t nb_stages; /* number of stages per blender */ + uint32_t max_width; /* Maximum output resolution */ + uint32_t max_height; +}; + +struct mdp5_pipe_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t caps; /* pipe capabilities */ +}; + +struct mdp5_ctl_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t flush_hw_mask; /* FLUSH register's hardware mask */ +}; + +struct mdp5_smp_block { + int mmb_count; /* number of SMP MMBs */ + int mmb_size; /* MMB: size in bytes */ + uint32_t clients[MAX_CLIENTS]; /* SMP port allocation /pipe */ + mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */ + uint8_t reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */ +}; + +struct mdp5_mdp_block { + MDP5_SUB_BLOCK_DEFINITION; + uint32_t caps; /* MDP capabilities: MDP_CAP_xxx bits */ +}; + +#define MDP5_INTF_NUM_MAX 5 + +struct mdp5_intf_block { + uint32_t base[MAX_BASES]; + u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */ +}; + +struct mdp5_perf_block { + u32 ab_inefficiency; + u32 ib_inefficiency; + u32 clk_inefficiency; +}; + +struct mdp5_cfg_hw { + char *name; + + struct mdp5_mdp_block mdp; + struct mdp5_smp_block smp; + struct mdp5_ctl_block ctl; + struct mdp5_pipe_block pipe_vig; + struct mdp5_pipe_block pipe_rgb; + struct mdp5_pipe_block pipe_dma; + struct mdp5_pipe_block pipe_cursor; + struct mdp5_lm_block lm; + struct mdp5_sub_block dspp; + struct mdp5_sub_block ad; + struct mdp5_sub_block pp; + struct mdp5_sub_block dsc; + struct mdp5_sub_block cdm; + struct mdp5_intf_block intf; + struct mdp5_perf_block perf; + + uint32_t max_clk; +}; + +struct mdp5_cfg { + const struct mdp5_cfg_hw *hw; +}; + +struct mdp5_kms; +struct mdp5_cfg_handler; + +const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd); +struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd); +int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd); + +#define mdp5_cfg_intf_is_virtual(intf_type) ({ \ + typeof(intf_type) __val = (intf_type); \ + (__val) >= INTF_VIRTUAL ? true : false; }) + +struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms, + uint32_t major, uint32_t minor); +void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd); + +#endif /* __MDP5_CFG_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c new file mode 100644 index 000000000..a640af22e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include "mdp5_kms.h" + +#ifdef CONFIG_DRM_MSM_DSI + +static struct mdp5_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +#define VSYNC_CLK_RATE 19200000 +static int pingpong_tearcheck_setup(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct device *dev = encoder->dev->dev; + u32 total_lines, vclks_line, cfg; + long vsync_clk_speed; + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; + + if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { + DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n"); + return -EINVAL; + } + + total_lines = mode->vtotal * drm_mode_vrefresh(mode); + if (!total_lines) { + DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n", + __func__, mode->vtotal, drm_mode_vrefresh(mode)); + return -EINVAL; + } + + vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE); + if (vsync_clk_speed <= 0) { + DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n", + vsync_clk_speed); + return -EINVAL; + } + vclks_line = vsync_clk_speed / total_lines; + + cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN + | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN; + cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line); + + /* + * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on + * the vsync_clk equating to roughly half the desired panel refresh rate. + * This is only necessary as stability fallback if interrupts from the + * panel arrive too late or not at all, but is currently used by default + * because these panel interrupts are not wired up yet. + */ + mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg); + mdp5_write(mdp5_kms, + REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal)); + + mdp5_write(mdp5_kms, + REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay); + mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1); + mdp5_write(mdp5_kms, REG_MDP5_PP_START_POS(pp_id), mode->vdisplay); + mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id), + MDP5_PP_SYNC_THRESH_START(4) | + MDP5_PP_SYNC_THRESH_CONTINUE(4)); + mdp5_write(mdp5_kms, REG_MDP5_PP_AUTOREFRESH_CONFIG(pp_id), 0x0); + + return 0; +} + +static int pingpong_tearcheck_enable(struct drm_encoder *encoder) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; + int ret; + + ret = clk_set_rate(mdp5_kms->vsync_clk, + clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE)); + if (ret) { + DRM_DEV_ERROR(encoder->dev->dev, + "vsync_clk clk_set_rate failed, %d\n", ret); + return ret; + } + ret = clk_prepare_enable(mdp5_kms->vsync_clk); + if (ret) { + DRM_DEV_ERROR(encoder->dev->dev, + "vsync_clk clk_prepare_enable failed, %d\n", ret); + return ret; + } + + mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 1); + + return 0; +} + +static void pingpong_tearcheck_disable(struct drm_encoder *encoder) +{ + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; + + mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); + clk_disable_unprepare(mdp5_kms->vsync_clk); +} + +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + mode = adjusted_mode; + + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); + pingpong_tearcheck_setup(encoder, mode); + mdp5_crtc_set_pipeline(encoder->crtc); +} + +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; + struct mdp5_interface *intf = mdp5_cmd_enc->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + + if (WARN_ON(!mdp5_cmd_enc->enabled)) + return; + + pingpong_tearcheck_disable(encoder); + + mdp5_ctl_set_encoder_state(ctl, pipeline, false); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); + + mdp5_cmd_enc->enabled = false; +} + +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); + struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; + struct mdp5_interface *intf = mdp5_cmd_enc->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + + if (WARN_ON(mdp5_cmd_enc->enabled)) + return; + + if (pingpong_tearcheck_enable(encoder)) + return; + + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); + + mdp5_ctl_set_encoder_state(ctl, pipeline, true); + + mdp5_cmd_enc->enabled = true; +} + +int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) +{ + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms; + struct device *dev; + int intf_num; + u32 data = 0; + + if (!encoder || !slave_encoder) + return -EINVAL; + + mdp5_kms = get_kms(encoder); + intf_num = mdp5_cmd_enc->intf->num; + + /* Switch slave encoder's trigger MUX, to use the master's + * start signal for the slave encoder + */ + if (intf_num == 1) + data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; + else if (intf_num == 2) + data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; + else + return -EINVAL; + + /* Smart Panel, Sync mode */ + data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; + + dev = &mdp5_kms->pdev->dev; + + /* Make sure clocks are on when connectors calling this function. */ + pm_runtime_get_sync(dev); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); + + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, + MDP5_SPLIT_DPL_LOWER_SMART_PANEL); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + pm_runtime_put_sync(dev); + + return 0; +} +#endif /* CONFIG_DRM_MSM_DSI */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c new file mode 100644 index 000000000..86036dd4e --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -0,0 +1,1360 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdp5_kms.h" +#include "msm_gem.h" + +#define CURSOR_WIDTH 64 +#define CURSOR_HEIGHT 64 + +struct mdp5_crtc { + struct drm_crtc base; + int id; + bool enabled; + + spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ + + /* if there is a pending flip, these will be non-null: */ + struct drm_pending_vblank_event *event; + + /* Bits have been flushed at the last commit, + * used to decide if a vsync has happened since last commit. + */ + u32 flushed_mask; + +#define PENDING_CURSOR 0x1 +#define PENDING_FLIP 0x2 + atomic_t pending; + + /* for unref'ing cursor bo's after scanout completes: */ + struct drm_flip_work unref_cursor_work; + + struct mdp_irq vblank; + struct mdp_irq err; + struct mdp_irq pp_done; + + struct completion pp_completion; + + bool lm_cursor_enabled; + + struct { + /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ + spinlock_t lock; + + /* current cursor being scanned out: */ + struct drm_gem_object *scanout_bo; + uint64_t iova; + uint32_t width, height; + int x, y; + } cursor; +}; +#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base) + +static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc); + +static struct mdp5_kms *get_kms(struct drm_crtc *crtc) +{ + struct msm_drm_private *priv = crtc->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static void request_pending(struct drm_crtc *crtc, uint32_t pending) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + + atomic_or(pending, &mdp5_crtc->pending); + mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); +} + +static void request_pp_done_pending(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + reinit_completion(&mdp5_crtc->pp_completion); +} + +static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + bool start = !mdp5_cstate->defer_start; + + mdp5_cstate->defer_start = false; + + DBG("%s: flush=%08x", crtc->name, flush_mask); + + return mdp5_ctl_commit(ctl, pipeline, flush_mask, start); +} + +/* + * flush updates, to make sure hw is updated to new scanout fb, + * so that we can safely queue unref to current fb (ie. next + * vblank we know hw is done w/ previous scanout_fb). + */ +static u32 crtc_flush_all(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_hw_mixer *mixer, *r_mixer; + struct drm_plane *plane; + uint32_t flush_mask = 0; + + /* this should not happen: */ + if (WARN_ON(!mdp5_cstate->ctl)) + return 0; + + drm_atomic_crtc_for_each_plane(plane, crtc) { + if (!plane->state->visible) + continue; + flush_mask |= mdp5_plane_get_flush(plane); + } + + mixer = mdp5_cstate->pipeline.mixer; + flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm); + + r_mixer = mdp5_cstate->pipeline.r_mixer; + if (r_mixer) + flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); + + return crtc_flush(crtc, flush_mask); +} + +/* if file!=NULL, this is preclose potential cancel-flip path */ +static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + struct drm_device *dev = crtc->dev; + struct drm_pending_vblank_event *event; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + event = mdp5_crtc->event; + if (event) { + mdp5_crtc->event = NULL; + DBG("%s: send event: %p", crtc->name, event); + drm_crtc_send_vblank_event(crtc, event); + } + spin_unlock_irqrestore(&dev->event_lock, flags); + + if (ctl && !crtc->state->enable) { + /* set STAGE_UNUSED for all layers */ + mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0); + /* XXX: What to do here? */ + /* mdp5_crtc->ctl = NULL; */ + } +} + +static void unref_cursor_worker(struct drm_flip_work *work, void *val) +{ + struct mdp5_crtc *mdp5_crtc = + container_of(work, struct mdp5_crtc, unref_cursor_work); + struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); + struct msm_kms *kms = &mdp5_kms->base.base; + + msm_gem_unpin_iova(val, kms->aspace); + drm_gem_object_put(val); +} + +static void mdp5_crtc_destroy(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + + drm_crtc_cleanup(crtc); + drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work); + + kfree(mdp5_crtc); +} + +static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) +{ + switch (stage) { + case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA; + case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA; + case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA; + case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA; + case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA; + case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA; + case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA; + default: + return 0; + } +} + +/* + * left/right pipe offsets for the stage array used in blend_setup() + */ +#define PIPE_LEFT 0 +#define PIPE_RIGHT 1 + +/* + * blend_setup() - blend all the planes of a CRTC + * + * If no base layer is available, border will be enabled as the base layer. + * Otherwise all layers will be blended based on their stage calculated + * in mdp5_crtc_atomic_check. + */ +static void blend_setup(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct drm_plane *plane; + struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; + const struct mdp_format *format; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + uint32_t lm = mixer->lm; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; + uint32_t r_lm = r_mixer ? r_mixer->lm : 0; + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; + unsigned long flags; + enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; + enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; + int i, plane_cnt = 0; + bool bg_alpha_enabled = false; + u32 mixer_op_mode = 0; + u32 val; +#define blender(stage) ((stage) - STAGE0) + + spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); + + /* ctl could be released already when we are shutting down: */ + /* XXX: Can this happen now? */ + if (!ctl) + goto out; + + /* Collect all plane information */ + drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp5_pipe right_pipe; + + if (!plane->state->visible) + continue; + + pstate = to_mdp5_plane_state(plane->state); + pstates[pstate->stage] = pstate; + stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane); + /* + * if we have a right mixer, stage the same pipe as we + * have on the left mixer + */ + if (r_mixer) + r_stage[pstate->stage][PIPE_LEFT] = + mdp5_plane_pipe(plane); + /* + * if we have a right pipe (i.e, the plane comprises of 2 + * hwpipes, then stage the right pipe on the right side of both + * the layer mixers + */ + right_pipe = mdp5_plane_right_pipe(plane); + if (right_pipe) { + stage[pstate->stage][PIPE_RIGHT] = right_pipe; + r_stage[pstate->stage][PIPE_RIGHT] = right_pipe; + } + + plane_cnt++; + } + + if (!pstates[STAGE_BASE]) { + ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; + DBG("Border Color is enabled"); + } else if (plane_cnt) { + format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb)); + + if (format->alpha_enable) + bg_alpha_enabled = true; + } + + /* The reset for blending */ + for (i = STAGE0; i <= STAGE_MAX; i++) { + if (!pstates[i]) + continue; + + format = to_mdp_format( + msm_framebuffer_format(pstates[i]->base.fb)); + plane = pstates[i]->base.plane; + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); + fg_alpha = pstates[i]->base.alpha >> 8; + bg_alpha = 0xFF - fg_alpha; + + if (!format->alpha_enable && bg_alpha_enabled) + mixer_op_mode = 0; + else + mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i); + + DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); + + if (format->alpha_enable && + pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } else if (format->alpha_enable && + pstates[i]->base.pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { + blend_op = MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL) | + MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL); + if (fg_alpha != 0xff) { + bg_alpha = fg_alpha; + blend_op |= + MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA | + MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA; + } else { + blend_op |= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA; + } + } + + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(lm, + blender(i)), blend_op); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm, + blender(i)), fg_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, + blender(i)), bg_alpha); + if (r_mixer) { + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm, + blender(i)), blend_op); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm, + blender(i)), fg_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm, + blender(i)), bg_alpha); + } + } + + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), + val | mixer_op_mode); + if (r_mixer) { + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), + val | mixer_op_mode); + } + + mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt, + ctl_blend_flags); +out: + spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); +} + +static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer; + struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer; + uint32_t lm = mixer->lm; + u32 mixer_width, val; + unsigned long flags; + struct drm_display_mode *mode; + + if (WARN_ON(!crtc->state)) + return; + + mode = &crtc->state->adjusted_mode; + + DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode)); + + mixer_width = mode->hdisplay; + if (r_mixer) + mixer_width /= 2; + + spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm), + MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | + MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); + + /* Assign mixer to LEFT side in source split mode */ + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); + val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val); + + if (r_mixer) { + u32 r_lm = r_mixer->lm; + + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm), + MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | + MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); + + /* Assign mixer to RIGHT side in source split mode */ + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); + val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val); + } + + spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); +} + +static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) + if (encoder->crtc == crtc) + return encoder; + + return NULL; +} + +static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc, + bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) +{ + unsigned int pipe = crtc->index; + struct drm_encoder *encoder; + int line, vsw, vbp, vactive_start, vactive_end, vfp_end; + + + encoder = get_encoder_from_crtc(crtc); + if (!encoder) { + DRM_ERROR("no encoder found for crtc %d\n", pipe); + return false; + } + + vsw = mode->crtc_vsync_end - mode->crtc_vsync_start; + vbp = mode->crtc_vtotal - mode->crtc_vsync_end; + + /* + * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at + * the end of VFP. Translate the porch values relative to the line + * counter positions. + */ + + vactive_start = vsw + vbp + 1; + + vactive_end = vactive_start + mode->crtc_vdisplay; + + /* last scan line before VSYNC */ + vfp_end = mode->crtc_vtotal; + + if (stime) + *stime = ktime_get(); + + line = mdp5_encoder_get_linecount(encoder); + + if (line < vactive_start) + line -= vactive_start; + else if (line > vactive_end) + line = line - vfp_end - vactive_start; + else + line -= vactive_start; + + *vpos = line; + *hpos = 0; + + if (etime) + *etime = ktime_get(); + + return true; +} + +static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder; + + encoder = get_encoder_from_crtc(crtc); + if (!encoder) + return 0; + + return mdp5_encoder_get_framecount(encoder); +} + +static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct device *dev = &mdp5_kms->pdev->dev; + unsigned long flags; + + DBG("%s", crtc->name); + + if (WARN_ON(!mdp5_crtc->enabled)) + return; + + /* Disable/save vblank irq handling before power is disabled */ + drm_crtc_vblank_off(crtc); + + if (mdp5_cstate->cmd_mode) + mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); + + mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); + pm_runtime_put_sync(dev); + + if (crtc->state->event && !crtc->state->active) { + WARN_ON(mdp5_crtc->event); + spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags); + } + + mdp5_crtc->enabled = false; +} + +static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; + u32 count; + + count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff; + drm_crtc_set_max_vblank_count(crtc, count); + + drm_crtc_vblank_on(crtc); +} + +static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct device *dev = &mdp5_kms->pdev->dev; + + DBG("%s", crtc->name); + + if (WARN_ON(mdp5_crtc->enabled)) + return; + + pm_runtime_get_sync(dev); + + if (mdp5_crtc->lm_cursor_enabled) { + /* + * Restore LM cursor state, as it might have been lost + * with suspend: + */ + if (mdp5_crtc->cursor.iova) { + unsigned long flags; + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + mdp5_crtc_restore_cursor(crtc); + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + mdp5_ctl_set_cursor(mdp5_cstate->ctl, + &mdp5_cstate->pipeline, 0, true); + } else { + mdp5_ctl_set_cursor(mdp5_cstate->ctl, + &mdp5_cstate->pipeline, 0, false); + } + } + + /* Restore vblank irq handling after power is enabled */ + mdp5_crtc_vblank_on(crtc); + + mdp5_crtc_mode_set_nofb(crtc); + + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); + + if (mdp5_cstate->cmd_mode) + mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done); + + mdp5_crtc->enabled = true; +} + +static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state, + bool need_right_mixer) +{ + struct mdp5_crtc_state *mdp5_cstate = + to_mdp5_crtc_state(new_crtc_state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_interface *intf; + bool new_mixer = false; + + new_mixer = !pipeline->mixer; + + if ((need_right_mixer && !pipeline->r_mixer) || + (!need_right_mixer && pipeline->r_mixer)) + new_mixer = true; + + if (new_mixer) { + struct mdp5_hw_mixer *old_mixer = pipeline->mixer; + struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer; + u32 caps; + int ret; + + caps = MDP_LM_CAP_DISPLAY; + if (need_right_mixer) + caps |= MDP_LM_CAP_PAIR; + + ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps, + &pipeline->mixer, need_right_mixer ? + &pipeline->r_mixer : NULL); + if (ret) + return ret; + + ret = mdp5_mixer_release(new_crtc_state->state, old_mixer); + if (ret) + return ret; + + if (old_r_mixer) { + ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer); + if (ret) + return ret; + + if (!need_right_mixer) + pipeline->r_mixer = NULL; + } + } + + /* + * these should have been already set up in the encoder's atomic + * check (called by drm_atomic_helper_check_modeset) + */ + intf = pipeline->intf; + + mdp5_cstate->err_irqmask = intf2err(intf->num); + mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf); + + if ((intf->type == INTF_DSI) && + (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) { + mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer); + mdp5_cstate->cmd_mode = true; + } else { + mdp5_cstate->pp_done_irqmask = 0; + mdp5_cstate->cmd_mode = false; + } + + return 0; +} + +struct plane_state { + struct drm_plane *plane; + struct mdp5_plane_state *state; +}; + +static int pstate_cmp(const void *a, const void *b) +{ + struct plane_state *pa = (struct plane_state *)a; + struct plane_state *pb = (struct plane_state *)b; + return pa->state->base.normalized_zpos - pb->state->base.normalized_zpos; +} + +/* is there a helper for this? */ +static bool is_fullscreen(struct drm_crtc_state *cstate, + struct drm_plane_state *pstate) +{ + return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && + ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && + ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); +} + +static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state, + struct drm_plane_state *bpstate) +{ + struct mdp5_crtc_state *mdp5_cstate = + to_mdp5_crtc_state(new_crtc_state); + + /* + * if we're in source split mode, it's mandatory to have + * border out on the base stage + */ + if (mdp5_cstate->pipeline.r_mixer) + return STAGE0; + + /* if the bottom-most layer is not fullscreen, we need to use + * it for solid-color: + */ + if (!is_fullscreen(new_crtc_state, bpstate)) + return STAGE0; + + return STAGE_BASE; +} + +static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state); + struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct drm_plane *plane; + struct drm_device *dev = crtc->dev; + struct plane_state pstates[STAGE_MAX + 1]; + const struct mdp5_cfg_hw *hw_cfg; + const struct drm_plane_state *pstate; + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; + bool cursor_plane = false; + bool need_right_mixer = false; + int cnt = 0, i; + int ret; + enum mdp_mixer_stage_id start; + + DBG("%s: check", crtc->name); + + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { + struct mdp5_plane_state *mdp5_pstate = + to_mdp5_plane_state(pstate); + + if (!pstate->visible) + continue; + + pstates[cnt].plane = plane; + pstates[cnt].state = to_mdp5_plane_state(pstate); + + mdp5_pstate->needs_dirtyfb = + intf->mode == MDP5_INTF_DSI_MODE_COMMAND; + + /* + * if any plane on this crtc uses 2 hwpipes, then we need + * the crtc to have a right hwmixer. + */ + if (pstates[cnt].state->r_hwpipe) + need_right_mixer = true; + cnt++; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor_plane = true; + } + + /* bail out early if there aren't any planes */ + if (!cnt) + return 0; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* + * we need a right hwmixer if the mode's width is greater than a single + * LM's max width + */ + if (mode->hdisplay > hw_cfg->lm.max_width) + need_right_mixer = true; + + ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer); + if (ret) { + DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret); + return ret; + } + + /* assign a stage based on sorted zpos property */ + sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + + /* trigger a warning if cursor isn't the highest zorder */ + WARN_ON(cursor_plane && + (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); + + start = get_start_stage(crtc, crtc_state, &pstates[0].state->base); + + /* verify that there are not too many planes attached to crtc + * and that we don't have conflicting mixer stages: + */ + if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) { + DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n", + cnt, start); + return -EINVAL; + } + + for (i = 0; i < cnt; i++) { + if (cursor_plane && (i == (cnt - 1))) + pstates[i].state->stage = hw_cfg->lm.nb_stages; + else + pstates[i].state->stage = start + i; + DBG("%s: assign pipe %s on stage=%d", crtc->name, + pstates[i].plane->name, + pstates[i].state->stage); + } + + return 0; +} + +static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + DBG("%s: begin", crtc->name); +} + +static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct drm_device *dev = crtc->dev; + unsigned long flags; + + DBG("%s: event: %p", crtc->name, crtc->state->event); + + WARN_ON(mdp5_crtc->event); + + spin_lock_irqsave(&dev->event_lock, flags); + mdp5_crtc->event = crtc->state->event; + crtc->state->event = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* + * If no CTL has been allocated in mdp5_crtc_atomic_check(), + * it means we are trying to flush a CRTC whose state is disabled: + * nothing else needs to be done. + */ + /* XXX: Can this happen now ? */ + if (unlikely(!mdp5_cstate->ctl)) + return; + + blend_setup(crtc); + + /* PP_DONE irq is only used by command mode for now. + * It is better to request pending before FLUSH and START trigger + * to make sure no pp_done irq missed. + * This is safe because no pp_done will happen before SW trigger + * in command mode. + */ + if (mdp5_cstate->cmd_mode) + request_pp_done_pending(crtc); + + mdp5_crtc->flushed_mask = crtc_flush_all(crtc); + + /* XXX are we leaking out state here? */ + mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask; + mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask; + mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask; + + request_pending(crtc, PENDING_FLIP); +} + +static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + uint32_t xres = crtc->mode.hdisplay; + uint32_t yres = crtc->mode.vdisplay; + + /* + * Cursor Region Of Interest (ROI) is a plane read from cursor + * buffer to render. The ROI region is determined by the visibility of + * the cursor point. In the default Cursor image the cursor point will + * be at the top left of the cursor image. + * + * Without rotation: + * If the cursor point reaches the right (xres - x < cursor.width) or + * bottom (yres - y < cursor.height) boundary of the screen, then ROI + * width and ROI height need to be evaluated to crop the cursor image + * accordingly. + * (xres-x) will be new cursor width when x > (xres - cursor.width) + * (yres-y) will be new cursor height when y > (yres - cursor.height) + * + * With rotation: + * We get negative x and/or y coordinates. + * (cursor.width - abs(x)) will be new cursor width when x < 0 + * (cursor.height - abs(y)) will be new cursor width when y < 0 + */ + if (mdp5_crtc->cursor.x >= 0) + *roi_w = min(mdp5_crtc->cursor.width, xres - + mdp5_crtc->cursor.x); + else + *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x); + if (mdp5_crtc->cursor.y >= 0) + *roi_h = min(mdp5_crtc->cursor.height, yres - + mdp5_crtc->cursor.y); + else + *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y); +} + +static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ARGB8888); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; + uint32_t blendcfg, stride; + uint32_t x, y, src_x, src_y, width, height; + uint32_t roi_w, roi_h; + int lm; + + assert_spin_locked(&mdp5_crtc->cursor.lock); + + lm = mdp5_cstate->pipeline.mixer->lm; + + x = mdp5_crtc->cursor.x; + y = mdp5_crtc->cursor.y; + width = mdp5_crtc->cursor.width; + height = mdp5_crtc->cursor.height; + + stride = width * info->cpp[0]; + + get_roi(crtc, &roi_w, &roi_h); + + /* If cusror buffer overlaps due to rotation on the + * upper or left screen border the pixel offset inside + * the cursor buffer of the ROI is the positive overlap + * distance. + */ + if (mdp5_crtc->cursor.x < 0) { + src_x = abs(mdp5_crtc->cursor.x); + x = 0; + } else { + src_x = 0; + } + if (mdp5_crtc->cursor.y < 0) { + src_y = abs(mdp5_crtc->cursor.y); + y = 0; + } else { + src_y = 0; + } + DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d", + crtc->name, x, y, roi_w, roi_h, src_x, src_y); + + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), + MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm), + MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) | + MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), + MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | + MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm), + MDP5_LM_CURSOR_START_XY_Y_START(y) | + MDP5_LM_CURSOR_START_XY_X_START(x)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm), + MDP5_LM_CURSOR_XY_SRC_Y(src_y) | + MDP5_LM_CURSOR_XY_SRC_X(src_x)); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), + mdp5_crtc->cursor.iova); + + blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN; + blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg); +} + +static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, + struct drm_file *file, uint32_t handle, + uint32_t width, uint32_t height) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct drm_device *dev = crtc->dev; + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct platform_device *pdev = mdp5_kms->pdev; + struct msm_kms *kms = &mdp5_kms->base.base; + struct drm_gem_object *cursor_bo, *old_bo = NULL; + struct mdp5_ctl *ctl; + int ret; + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + bool cursor_enable = true; + unsigned long flags; + + if (!mdp5_crtc->lm_cursor_enabled) { + dev_warn(dev->dev, + "cursor_set is deprecated with cursor planes\n"); + return -EINVAL; + } + + if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { + DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height); + return -EINVAL; + } + + ctl = mdp5_cstate->ctl; + if (!ctl) + return -EINVAL; + + /* don't support LM cursors when we have source split enabled */ + if (mdp5_cstate->pipeline.r_mixer) + return -EINVAL; + + if (!handle) { + DBG("Cursor off"); + cursor_enable = false; + mdp5_crtc->cursor.iova = 0; + pm_runtime_get_sync(&pdev->dev); + goto set_cursor; + } + + cursor_bo = drm_gem_object_lookup(file, handle); + if (!cursor_bo) + return -ENOENT; + + ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, + &mdp5_crtc->cursor.iova); + if (ret) { + drm_gem_object_put(cursor_bo); + return -EINVAL; + } + + pm_runtime_get_sync(&pdev->dev); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + old_bo = mdp5_crtc->cursor.scanout_bo; + + mdp5_crtc->cursor.scanout_bo = cursor_bo; + mdp5_crtc->cursor.width = width; + mdp5_crtc->cursor.height = height; + + mdp5_crtc_restore_cursor(crtc); + + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + +set_cursor: + ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n", + cursor_enable ? "en" : "dis", ret); + goto end; + } + + crtc_flush(crtc, flush_mask); + +end: + pm_runtime_put_sync(&pdev->dev); + if (old_bo) { + drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); + /* enable vblank to complete cursor work: */ + request_pending(crtc, PENDING_CURSOR); + } + return ret; +} + +static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); + struct drm_device *dev = crtc->dev; + uint32_t roi_w; + uint32_t roi_h; + unsigned long flags; + + if (!mdp5_crtc->lm_cursor_enabled) { + dev_warn(dev->dev, + "cursor_move is deprecated with cursor planes\n"); + return -EINVAL; + } + + /* don't support LM cursors when we have source split enabled */ + if (mdp5_cstate->pipeline.r_mixer) + return -EINVAL; + + /* In case the CRTC is disabled, just drop the cursor update */ + if (unlikely(!crtc->state->enable)) + return 0; + + /* accept negative x/y coordinates up to maximum cursor overlap */ + mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width); + mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height); + + get_roi(crtc, &roi_w, &roi_h); + + pm_runtime_get_sync(&mdp5_kms->pdev->dev); + + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); + mdp5_crtc_restore_cursor(crtc); + spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); + + crtc_flush(crtc, flush_mask); + + pm_runtime_put_sync(&mdp5_kms->pdev->dev); + + return 0; +} + +static void +mdp5_crtc_atomic_print_state(struct drm_printer *p, + const struct drm_crtc_state *state) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_kms *mdp5_kms = get_kms(state->crtc); + + if (WARN_ON(!pipeline)) + return; + + if (mdp5_cstate->ctl) + drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl)); + + drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ? + pipeline->mixer->name : "(null)"); + + if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) + drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ? + pipeline->r_mixer->name : "(null)"); + + drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode); +} + +static struct drm_crtc_state * +mdp5_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc->state)) + return NULL; + + mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state), + sizeof(*mdp5_cstate), GFP_KERNEL); + if (!mdp5_cstate) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base); + + return &mdp5_cstate->base; +} + +static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(state); + + kfree(mdp5_cstate); +} + +static void mdp5_crtc_reset(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = + kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL); + + if (crtc->state) + mdp5_crtc_destroy_state(crtc, crtc->state); + + if (mdp5_cstate) + __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); +} + +static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp5_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = mdp5_crtc_reset, + .atomic_duplicate_state = mdp5_crtc_duplicate_state, + .atomic_destroy_state = mdp5_crtc_destroy_state, + .atomic_print_state = mdp5_crtc_atomic_print_state, + .get_vblank_counter = mdp5_crtc_get_vblank_counter, + .enable_vblank = msm_crtc_enable_vblank, + .disable_vblank = msm_crtc_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +}; + +static const struct drm_crtc_funcs mdp5_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp5_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = mdp5_crtc_reset, + .atomic_duplicate_state = mdp5_crtc_duplicate_state, + .atomic_destroy_state = mdp5_crtc_destroy_state, + .cursor_set = mdp5_crtc_cursor_set, + .cursor_move = mdp5_crtc_cursor_move, + .atomic_print_state = mdp5_crtc_atomic_print_state, + .get_vblank_counter = mdp5_crtc_get_vblank_counter, + .enable_vblank = msm_crtc_enable_vblank, + .disable_vblank = msm_crtc_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +}; + +static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { + .mode_set_nofb = mdp5_crtc_mode_set_nofb, + .atomic_check = mdp5_crtc_atomic_check, + .atomic_begin = mdp5_crtc_atomic_begin, + .atomic_flush = mdp5_crtc_atomic_flush, + .atomic_enable = mdp5_crtc_atomic_enable, + .atomic_disable = mdp5_crtc_atomic_disable, + .get_scanout_position = mdp5_crtc_get_scanout_position, +}; + +static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); + struct drm_crtc *crtc = &mdp5_crtc->base; + struct msm_drm_private *priv = crtc->dev->dev_private; + unsigned pending; + + mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); + + pending = atomic_xchg(&mdp5_crtc->pending, 0); + + if (pending & PENDING_FLIP) { + complete_flip(crtc, NULL); + } + + if (pending & PENDING_CURSOR) + drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq); +} + +static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); + + DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus); +} + +static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, + pp_done); + + complete_all(&mdp5_crtc->pp_completion); +} + +static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + int ret; + + ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, + msecs_to_jiffies(50)); + if (ret == 0) + dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n", + mdp5_cstate->pipeline.mixer->lm); +} + +static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + int ret; + + /* Should not call this function if crtc is disabled. */ + if (!ctl) + return; + + ret = drm_crtc_vblank_get(crtc); + if (ret) + return; + + ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, + ((mdp5_ctl_get_commit_status(ctl) & + mdp5_crtc->flushed_mask) == 0), + msecs_to_jiffies(50)); + if (ret <= 0) + dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id); + + mdp5_crtc->flushed_mask = 0; + + drm_crtc_vblank_put(crtc); +} + +uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + return mdp5_crtc->vblank.irqmask; +} + +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_kms *mdp5_kms = get_kms(crtc); + + /* should this be done elsewhere ? */ + mdp_irq_update(&mdp5_kms->base); + + mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline); +} + +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return mdp5_cstate->ctl; +} + +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc)) + return ERR_PTR(-EINVAL); + + mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return WARN_ON(!mdp5_cstate->pipeline.mixer) ? + ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer; +} + +struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc)) + return ERR_PTR(-EINVAL); + + mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return &mdp5_cstate->pipeline; +} + +void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + if (mdp5_cstate->cmd_mode) + mdp5_crtc_wait_for_pp_done(crtc); + else + mdp5_crtc_wait_for_flush_done(crtc); +} + +/* initialize crtc */ +struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id) +{ + struct drm_crtc *crtc = NULL; + struct mdp5_crtc *mdp5_crtc; + + mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); + if (!mdp5_crtc) + return ERR_PTR(-ENOMEM); + + crtc = &mdp5_crtc->base; + + mdp5_crtc->id = id; + + spin_lock_init(&mdp5_crtc->lm_lock); + spin_lock_init(&mdp5_crtc->cursor.lock); + init_completion(&mdp5_crtc->pp_completion); + + mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; + mdp5_crtc->err.irq = mdp5_crtc_err_irq; + mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq; + + mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; + + drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, + cursor_plane ? + &mdp5_crtc_no_lm_cursor_funcs : + &mdp5_crtc_funcs, NULL); + + drm_flip_work_init(&mdp5_crtc->unref_cursor_work, + "unref cursor", unref_cursor_worker); + + drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); + + return crtc; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c new file mode 100644 index 000000000..1220f2b20 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -0,0 +1,764 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. + */ + +#include "mdp5_kms.h" +#include "mdp5_ctl.h" + +/* + * CTL - MDP Control Pool Manager + * + * Controls are shared between all display interfaces. + * + * They are intended to be used for data path configuration. + * The top level register programming describes the complete data path for + * a specific data path ID - REG_MDP5_CTL_*(, ...) + * + * Hardware capabilities determine the number of concurrent data paths + * + * In certain use cases (high-resolution dual pipe), one single CTL can be + * shared across multiple CRTCs. + */ + +#define CTL_STAT_BUSY 0x1 +#define CTL_STAT_BOOKED 0x2 + +struct mdp5_ctl { + struct mdp5_ctl_manager *ctlm; + + u32 id; + + /* CTL status bitmask */ + u32 status; + + bool encoder_enabled; + + /* pending flush_mask bits */ + u32 flush_mask; + + /* REG_MDP5_CTL_*() registers access info + lock: */ + spinlock_t hw_lock; + u32 reg_offset; + + /* when do CTL registers need to be flushed? (mask of trigger bits) */ + u32 pending_ctl_trigger; + + bool cursor_on; + + /* True if the current CTL has FLUSH bits pending for single FLUSH. */ + bool flush_pending; + + struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ +}; + +struct mdp5_ctl_manager { + struct drm_device *dev; + + /* number of CTL / Layer Mixers in this hw config: */ + u32 nlm; + u32 nctl; + + /* to filter out non-present bits in the current hardware config */ + u32 flush_hw_mask; + + /* status for single FLUSH */ + bool single_flush_supported; + u32 single_flush_pending_mask; + + /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ + spinlock_t pool_lock; + struct mdp5_ctl ctls[MAX_CTL]; +}; + +static inline +struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr) +{ + struct msm_drm_private *priv = ctl_mgr->dev->dev_private; + + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static inline +void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) +{ + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); + + (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ + mdp5_write(mdp5_kms, reg, data); +} + +static inline +u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) +{ + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); + + (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ + return mdp5_read(mdp5_kms, reg); +} + +static void set_display_intf(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf) +{ + unsigned long flags; + u32 intf_sel; + + spin_lock_irqsave(&mdp5_kms->resource_lock, flags); + intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); + + switch (intf->num) { + case 0: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type); + break; + case 1: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type); + break; + case 2: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type); + break; + case 3: + intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK; + intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type); + break; + default: + BUG(); + break; + } + + mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); + spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); +} + +static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) +{ + unsigned long flags; + struct mdp5_interface *intf = pipeline->intf; + u32 ctl_op = 0; + + if (!mdp5_cfg_intf_is_virtual(intf->type)) + ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num); + + switch (intf->type) { + case INTF_DSI: + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + ctl_op |= MDP5_CTL_OP_CMD_MODE; + break; + + case INTF_WB: + if (intf->mode == MDP5_INTF_WB_MODE_LINE) + ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE); + break; + + default: + break; + } + + if (pipeline->r_mixer) + ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE | + MDP5_CTL_OP_PACK_3D(1); + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) +{ + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); + struct mdp5_interface *intf = pipeline->intf; + + /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ + if (!mdp5_cfg_intf_is_virtual(intf->type)) + set_display_intf(mdp5_kms, intf); + + set_ctl_op(ctl, pipeline); + + return 0; +} + +static bool start_signal_needed(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline) +{ + struct mdp5_interface *intf = pipeline->intf; + + if (!ctl->encoder_enabled) + return false; + + switch (intf->type) { + case INTF_WB: + return true; + case INTF_DSI: + return intf->mode == MDP5_INTF_DSI_MODE_COMMAND; + default: + return false; + } +} + +/* + * send_start_signal() - Overlay Processor Start Signal + * + * For a given control operation (display pipeline), a START signal needs to be + * executed in order to kick off operation and activate all layers. + * e.g.: DSI command mode, Writeback + */ +static void send_start_signal(struct mdp5_ctl *ctl) +{ + unsigned long flags; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +/** + * mdp5_ctl_set_encoder_state() - set the encoder state + * + * @ctl: the CTL instance + * @pipeline: the encoder's INTF + MIXER configuration + * @enabled: true, when encoder is ready for data streaming; false, otherwise. + * + * Note: + * This encoder state is needed to trigger START signal (data path kickoff). + */ +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline, + bool enabled) +{ + struct mdp5_interface *intf = pipeline->intf; + + if (WARN_ON(!ctl)) + return -EINVAL; + + ctl->encoder_enabled = enabled; + DBG("intf_%d: %s", intf->num, enabled ? "on" : "off"); + + if (start_signal_needed(ctl, pipeline)) { + send_start_signal(ctl); + } + + return 0; +} + +/* + * Note: + * CTL registers need to be flushed after calling this function + * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) + */ +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + int cursor_id, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + unsigned long flags; + u32 blend_cfg; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + + if (WARN_ON(!mixer)) { + DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", + ctl->id); + return -EINVAL; + } + + if (pipeline->r_mixer) { + DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration"); + return -EINVAL; + } + + spin_lock_irqsave(&ctl->hw_lock, flags); + + blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); + + if (enable) + blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; + else + blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; + + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); + ctl->cursor_on = enable; + + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); + + return 0; +} + +static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage); + case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage); + case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage); + case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage); + case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage); + case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage); + case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage); + case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); + case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); + case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); + case SSPP_CURSOR0: + case SSPP_CURSOR1: + default: return 0; + } +} + +static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, + enum mdp_mixer_stage_id stage) +{ + if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1)) + return 0; + + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3; + case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3; + case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3; + case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3; + case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3; + case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3; + case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3; + case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; + case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; + case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; + case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); + case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); + default: return 0; + } +} + +static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) +{ + unsigned long flags; + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + int i; + + spin_lock_irqsave(&ctl->hw_lock, flags); + + for (i = 0; i < ctl_mgr->nlm; i++) { + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); + } + + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +#define PIPE_LEFT 0 +#define PIPE_RIGHT 1 +int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + enum mdp5_pipe stage[][MAX_PIPE_STAGE], + enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], + u32 stage_cnt, u32 ctl_blend_op_flags) +{ + struct mdp5_hw_mixer *mixer = pipeline->mixer; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; + unsigned long flags; + u32 blend_cfg = 0, blend_ext_cfg = 0; + u32 r_blend_cfg = 0, r_blend_ext_cfg = 0; + int i, start_stage; + + mdp5_ctl_reset_blend_regs(ctl); + + if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { + start_stage = STAGE0; + blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; + if (r_mixer) + r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; + } else { + start_stage = STAGE_BASE; + } + + for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { + blend_cfg |= + mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i); + blend_ext_cfg |= + mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i); + if (r_mixer) { + r_blend_cfg |= + mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i); + r_blend_ext_cfg |= + mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i); + } + } + + spin_lock_irqsave(&ctl->hw_lock, flags); + if (ctl->cursor_on) + blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; + + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), + blend_ext_cfg); + if (r_mixer) { + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), + r_blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), + r_blend_ext_cfg); + } + spin_unlock_irqrestore(&ctl->hw_lock, flags); + + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); + if (r_mixer) + ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); + + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm, + blend_cfg, blend_ext_cfg); + if (r_mixer) + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", + r_mixer->lm, r_blend_cfg, r_blend_ext_cfg); + + return 0; +} + +u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf) +{ + if (intf->type == INTF_WB) + return MDP5_CTL_FLUSH_WB; + + switch (intf->num) { + case 0: return MDP5_CTL_FLUSH_TIMING_0; + case 1: return MDP5_CTL_FLUSH_TIMING_1; + case 2: return MDP5_CTL_FLUSH_TIMING_2; + case 3: return MDP5_CTL_FLUSH_TIMING_3; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_cursor(int cursor_id) +{ + switch (cursor_id) { + case 0: return MDP5_CTL_FLUSH_CURSOR_0; + case 1: return MDP5_CTL_FLUSH_CURSOR_1; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0; + case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1; + case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2; + case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0; + case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1; + case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2; + case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0; + case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; + case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; + case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; + case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0; + case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1; + default: return 0; + } +} + +u32 mdp_ctl_flush_mask_lm(int lm) +{ + switch (lm) { + case 0: return MDP5_CTL_FLUSH_LM0; + case 1: return MDP5_CTL_FLUSH_LM1; + case 2: return MDP5_CTL_FLUSH_LM2; + case 3: return MDP5_CTL_FLUSH_LM3; + case 4: return MDP5_CTL_FLUSH_LM4; + case 5: return MDP5_CTL_FLUSH_LM5; + default: return 0; + } +} + +static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + u32 flush_mask) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + u32 sw_mask = 0; +#define BIT_NEEDS_SW_FIX(bit) \ + (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit)) + + /* for some targets, cursor bit is the same as LM bit */ + if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) + sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm); + + return sw_mask; +} + +static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, + u32 *flush_id) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + + if (ctl->pair) { + DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); + ctl->flush_pending = true; + ctl_mgr->single_flush_pending_mask |= (*flush_mask); + *flush_mask = 0; + + if (ctl->pair->flush_pending) { + *flush_id = min_t(u32, ctl->id, ctl->pair->id); + *flush_mask = ctl_mgr->single_flush_pending_mask; + + ctl->flush_pending = false; + ctl->pair->flush_pending = false; + ctl_mgr->single_flush_pending_mask = 0; + + DBG("Single FLUSH mask %x,ID %d", *flush_mask, + *flush_id); + } + } +} + +/** + * mdp5_ctl_commit() - Register Flush + * + * @ctl: the CTL instance + * @pipeline: the encoder's INTF + MIXER configuration + * @flush_mask: bitmask of display controller hw blocks to flush + * @start: if true, immediately update flush registers and set START + * bit, otherwise accumulate flush_mask bits until we are + * ready to START + * + * The flush register is used to indicate several registers are all + * programmed, and are safe to update to the back copy of the double + * buffered registers. + * + * Some registers FLUSH bits are shared when the hardware does not have + * dedicated bits for them; handling these is the job of fix_sw_flush(). + * + * CTL registers need to be flushed in some circumstances; if that is the + * case, some trigger bits will be present in both flush mask and + * ctl->pending_ctl_trigger. + * + * Return H/W flushed bit mask. + */ +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline, + u32 flush_mask, bool start) +{ + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + unsigned long flags; + u32 flush_id = ctl->id; + u32 curr_ctl_flush_mask; + + VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger); + + if (ctl->pending_ctl_trigger & flush_mask) { + flush_mask |= MDP5_CTL_FLUSH_CTL; + ctl->pending_ctl_trigger = 0; + } + + flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); + + flush_mask &= ctl_mgr->flush_hw_mask; + + curr_ctl_flush_mask = flush_mask; + + fix_for_single_flush(ctl, &flush_mask, &flush_id); + + if (!start) { + ctl->flush_mask |= flush_mask; + return curr_ctl_flush_mask; + } else { + flush_mask |= ctl->flush_mask; + ctl->flush_mask = 0; + } + + if (flush_mask) { + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + } + + if (start_signal_needed(ctl, pipeline)) { + send_start_signal(ctl); + } + + return curr_ctl_flush_mask; +} + +u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) +{ + return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); +} + +int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) +{ + return WARN_ON(!ctl) ? -EINVAL : ctl->id; +} + +/* + * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH + */ +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable) +{ + struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm; + struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + + /* do nothing silently if hw doesn't support */ + if (!ctl_mgr->single_flush_supported) + return 0; + + if (!enable) { + ctlx->pair = NULL; + ctly->pair = NULL; + mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0); + return 0; + } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { + DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n"); + return -EINVAL; + } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) { + DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n"); + return -EINVAL; + } + + ctlx->pair = ctly; + ctly->pair = ctlx; + + mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, + MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); + + return 0; +} + +/* + * mdp5_ctl_request() - CTL allocation + * + * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. + * If no CTL is available in preferred category, allocate from the other one. + * + * @return fail if no CTL is available. + */ +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, + int intf_num) +{ + struct mdp5_ctl *ctl = NULL; + const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED; + u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; + unsigned long flags; + int c; + + spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + + /* search the preferred */ + for (c = 0; c < ctl_mgr->nctl; c++) + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; + + dev_warn(ctl_mgr->dev->dev, + "fall back to the other CTL category for INTF %d!\n", intf_num); + + match ^= CTL_STAT_BOOKED; + for (c = 0; c < ctl_mgr->nctl; c++) + if ((ctl_mgr->ctls[c].status & checkm) == match) + goto found; + + DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); + goto unlock; + +found: + ctl = &ctl_mgr->ctls[c]; + ctl->status |= CTL_STAT_BUSY; + ctl->pending_ctl_trigger = 0; + DBG("CTL %d allocated", ctl->id); + +unlock: + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + return ctl; +} + +void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr) +{ + unsigned long flags; + int c; + + for (c = 0; c < ctl_mgr->nctl; c++) { + struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; + + spin_lock_irqsave(&ctl->hw_lock, flags); + ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); + spin_unlock_irqrestore(&ctl->hw_lock, flags); + } +} + +void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr) +{ + kfree(ctl_mgr); +} + +struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd) +{ + struct mdp5_ctl_manager *ctl_mgr; + const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); + int rev = mdp5_cfg_get_hw_rev(cfg_hnd); + unsigned dsi_cnt = 0; + const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; + unsigned long flags; + int c, ret; + + ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL); + if (!ctl_mgr) { + DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); + ret = -ENOMEM; + goto fail; + } + + if (WARN_ON(ctl_cfg->count > MAX_CTL)) { + DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n", + ctl_cfg->count); + ret = -ENOSPC; + goto fail; + } + + /* initialize the CTL manager: */ + ctl_mgr->dev = dev; + ctl_mgr->nlm = hw_cfg->lm.count; + ctl_mgr->nctl = ctl_cfg->count; + ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask; + spin_lock_init(&ctl_mgr->pool_lock); + + /* initialize each CTL of the pool: */ + spin_lock_irqsave(&ctl_mgr->pool_lock, flags); + for (c = 0; c < ctl_mgr->nctl; c++) { + struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; + + if (WARN_ON(!ctl_cfg->base[c])) { + DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c); + ret = -EINVAL; + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + goto fail; + } + ctl->ctlm = ctl_mgr; + ctl->id = c; + ctl->reg_offset = ctl_cfg->base[c]; + ctl->status = 0; + spin_lock_init(&ctl->hw_lock); + } + + /* + * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI + * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when + * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. + * Single FLUSH is supported from hw rev v3.0. + */ + for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) + if (hw_cfg->intf.connect[c] == INTF_DSI) + dsi_cnt++; + if ((rev >= 3) && (dsi_cnt > 1)) { + ctl_mgr->single_flush_supported = true; + /* Reserve CTL0/1 for INTF1/2 */ + ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; + ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED; + } + spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); + DBG("Pool of %d CTLs created.", ctl_mgr->nctl); + + return ctl_mgr; + +fail: + if (ctl_mgr) + mdp5_ctlm_destroy(ctl_mgr); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h new file mode 100644 index 000000000..c2af68aa7 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + */ + +#ifndef __MDP5_CTL_H__ +#define __MDP5_CTL_H__ + +#include "msm_drv.h" + +/* + * CTL Manager prototypes: + * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler, + * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions. + */ +struct mdp5_ctl_manager; +struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, + void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd); +void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm); +void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm); + +/* + * CTL prototypes: + * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler, + * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. + */ +struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num); + +int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); + +struct mdp5_interface; +struct mdp5_pipeline; +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p); +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p, + bool enabled); + +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + int cursor_id, bool enable); +int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); + +#define MAX_PIPE_STAGE 2 + +/* + * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) + * + * @stage: array to contain the pipe num for each stage + * @stage_cnt: valid stage number in stage array + * @ctl_blend_op_flags: blender operation mode flags + * + * Note: + * CTL registers need to be flushed after calling this function + * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) + */ +#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) +int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + enum mdp5_pipe stage[][MAX_PIPE_STAGE], + enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], + u32 stage_cnt, u32 ctl_blend_op_flags); + +/** + * mdp_ctl_flush_mask...() - Register FLUSH masks + * + * These masks are used to specify which block(s) need to be flushed + * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask). + */ +u32 mdp_ctl_flush_mask_lm(int lm); +u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe); +u32 mdp_ctl_flush_mask_cursor(int cursor_id); +u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); + +/* @flush_mask: see CTL flush masks definitions below */ +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + u32 flush_mask, bool start); +u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); + + + +#endif /* __MDP5_CTL_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c new file mode 100644 index 000000000..79d67c495 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include + +#include "mdp5_kms.h" + +static struct mdp5_kms *get_kms(struct drm_encoder *encoder) +{ + struct msm_drm_private *priv = encoder->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static void mdp5_encoder_destroy(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + drm_encoder_cleanup(encoder); + kfree(mdp5_encoder); +} + +static const struct drm_encoder_funcs mdp5_encoder_funcs = { + .destroy = mdp5_encoder_destroy, +}; + +static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + int intf = mdp5_encoder->intf->num; + uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; + uint32_t display_v_start, display_v_end; + uint32_t hsync_start_x, hsync_end_x; + uint32_t format = 0x2100; + unsigned long flags; + + mode = adjusted_mode; + + DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode)); + + ctrl_pol = 0; + + /* DSI controller cannot handle active-low sync signals. */ + if (mdp5_encoder->intf->type != INTF_DSI) { + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; + } + /* probably need to get DATA_EN polarity from panel.. */ + + dtv_hsync_skew = 0; /* get this from panel? */ + + /* Get color format from panel, default is 8bpc */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) { + switch (connector->display_info.bpc) { + case 4: + format |= 0; + break; + case 5: + format |= 0x15; + break; + case 6: + format |= 0x2A; + break; + case 8: + default: + format |= 0x3F; + break; + } + break; + } + } + + hsync_start_x = (mode->htotal - mode->hsync_start); + hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1; + + vsync_period = mode->vtotal * mode->htotal; + vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal; + display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; + display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; + + /* + * For edp only: + * DISPLAY_V_START = (VBP * HCYCLE) + HBP + * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP + */ + if (mdp5_encoder->intf->type == INTF_eDP) { + display_v_start += mode->htotal - mode->hsync_start; + display_v_end -= mode->hsync_start - mode->hdisplay; + } + + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + + mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), + MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | + MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); + mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_PERIOD_F0(intf), vsync_period); + mdp5_write(mdp5_kms, REG_MDP5_INTF_VSYNC_LEN_F0(intf), vsync_len); + mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_HCTL(intf), + MDP5_INTF_DISPLAY_HCTL_START(hsync_start_x) | + MDP5_INTF_DISPLAY_HCTL_END(hsync_end_x)); + mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VSTART_F0(intf), display_v_start); + mdp5_write(mdp5_kms, REG_MDP5_INTF_DISPLAY_VEND_F0(intf), display_v_end); + mdp5_write(mdp5_kms, REG_MDP5_INTF_BORDER_COLOR(intf), 0); + mdp5_write(mdp5_kms, REG_MDP5_INTF_UNDERFLOW_COLOR(intf), 0xff); + mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_SKEW(intf), dtv_hsync_skew); + mdp5_write(mdp5_kms, REG_MDP5_INTF_POLARITY_CTL(intf), ctrl_pol); + mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_HCTL(intf), + MDP5_INTF_ACTIVE_HCTL_START(0) | + MDP5_INTF_ACTIVE_HCTL_END(0)); + mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VSTART_F0(intf), 0); + mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); + mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); + mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ + + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + + mdp5_crtc_set_pipeline(encoder->crtc); +} + +static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + struct mdp5_interface *intf = mdp5_encoder->intf; + int intfn = mdp5_encoder->intf->num; + unsigned long flags; + + if (WARN_ON(!mdp5_encoder->enabled)) + return; + + mdp5_ctl_set_encoder_state(ctl, pipeline, false); + + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); + + /* + * Wait for a vsync so we know the ENABLE=0 latched before + * the (connector) source of the vsync's gets disabled, + * otherwise we end up in a funny state if we re-enable + * before the disable latches, which results that some of + * the settings changes for the new modeset (like new + * scanout buffer) don't latch properly.. + */ + mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf)); + + mdp5_encoder->enabled = false; +} + +static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct mdp5_ctl *ctl = mdp5_encoder->ctl; + struct mdp5_interface *intf = mdp5_encoder->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + int intfn = intf->num; + unsigned long flags; + + if (WARN_ON(mdp5_encoder->enabled)) + return; + + spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); + spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); + + mdp5_ctl_set_encoder_state(ctl, pipeline, true); + + mdp5_encoder->enabled = true; +} + +static void mdp5_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); + else + mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); +} + +static void mdp5_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_disable(encoder); + else + mdp5_vid_encoder_disable(encoder); +} + +static void mdp5_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + /* this isn't right I think */ + struct drm_crtc_state *cstate = encoder->crtc->state; + + mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode); + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_enable(encoder); + else + mdp5_vid_encoder_enable(encoder); +} + +static int mdp5_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state); + struct mdp5_interface *intf = mdp5_encoder->intf; + struct mdp5_ctl *ctl = mdp5_encoder->ctl; + + mdp5_cstate->ctl = ctl; + mdp5_cstate->pipeline.intf = intf; + + /* + * This is a bit awkward, but we want to flush the CTL and hit the + * START bit at most once for an atomic update. In the non-full- + * modeset case, this is done from crtc->atomic_flush(), but that + * is too early in the case of full modeset, in which case we + * defer to encoder->enable(). But we need to *know* whether + * encoder->enable() will be called to do this: + */ + if (drm_atomic_crtc_needs_modeset(crtc_state)) + mdp5_cstate->defer_start = true; + + return 0; +} + +static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { + .disable = mdp5_encoder_disable, + .enable = mdp5_encoder_enable, + .atomic_check = mdp5_encoder_atomic_check, +}; + +int mdp5_encoder_get_linecount(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf->num; + + return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf)); +} + +u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_kms *mdp5_kms = get_kms(encoder); + int intf = mdp5_encoder->intf->num; + + return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); +} + +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); + struct mdp5_kms *mdp5_kms; + struct device *dev; + int intf_num; + u32 data = 0; + + if (!encoder || !slave_encoder) + return -EINVAL; + + mdp5_kms = get_kms(encoder); + intf_num = mdp5_encoder->intf->num; + + /* Switch slave encoder's TimingGen Sync mode, + * to use the master's enable signal for the slave encoder. + */ + if (intf_num == 1) + data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC; + else if (intf_num == 2) + data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC; + else + return -EINVAL; + + dev = &mdp5_kms->pdev->dev; + /* Make sure clocks are on when connectors calling this function. */ + pm_runtime_get_sync(dev); + + /* Dumb Panel, Sync mode */ + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); + mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); + + mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); + + pm_runtime_put_sync(dev); + + return 0; +} + +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = mdp5_encoder->intf; + + /* TODO: Expand this to set writeback modes too */ + if (cmd_mode) { + WARN_ON(intf->type != INTF_DSI); + intf->mode = MDP5_INTF_DSI_MODE_COMMAND; + } else { + if (intf->type == INTF_DSI) + intf->mode = MDP5_INTF_DSI_MODE_VIDEO; + else + intf->mode = MDP5_INTF_MODE_NONE; + } +} + +/* initialize encoder */ +struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf, + struct mdp5_ctl *ctl) +{ + struct drm_encoder *encoder = NULL; + struct mdp5_encoder *mdp5_encoder; + int enc_type = (intf->type == INTF_DSI) ? + DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS; + int ret; + + mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL); + if (!mdp5_encoder) { + ret = -ENOMEM; + goto fail; + } + + encoder = &mdp5_encoder->base; + mdp5_encoder->ctl = ctl; + mdp5_encoder->intf = intf; + + spin_lock_init(&mdp5_encoder->intf_lock); + + drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL); + + drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); + + return encoder; + +fail: + if (encoder) + mdp5_encoder_destroy(encoder); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c new file mode 100644 index 000000000..9b4c8d92f --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include + +#include +#include + +#include "msm_drv.h" +#include "mdp5_kms.h" + +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask) +{ + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR, + irqmask ^ (irqmask & old_irqmask)); + mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask); +} + +static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler); + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); + extern bool dumpstate; + + DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); + + if (dumpstate && __ratelimit(&rs)) { + struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev); + drm_state_dump(mdp5_kms->dev, &p); + if (mdp5_kms->smp) + mdp5_smp_dump(mdp5_kms->smp, &p); + } +} + +void mdp5_irq_preinstall(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff); + mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); + pm_runtime_put_sync(dev); +} + +int mdp5_irq_postinstall(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); + struct device *dev = &mdp5_kms->pdev->dev; + struct mdp_irq *error_handler = &mdp5_kms->error_handler; + + error_handler->irq = mdp5_irq_error_handler; + error_handler->irqmask = MDP5_IRQ_INTF0_UNDER_RUN | + MDP5_IRQ_INTF1_UNDER_RUN | + MDP5_IRQ_INTF2_UNDER_RUN | + MDP5_IRQ_INTF3_UNDER_RUN; + + pm_runtime_get_sync(dev); + mdp_irq_register(mdp_kms, error_handler); + pm_runtime_put_sync(dev); + + return 0; +} + +void mdp5_irq_uninstall(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000); + pm_runtime_put_sync(dev); +} + +irqreturn_t mdp5_irq(struct msm_kms *kms) +{ + struct mdp_kms *mdp_kms = to_mdp_kms(kms); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + unsigned int id; + uint32_t status, enable; + + enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN); + status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable; + mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status); + + VERB("status=%08x", status); + + mdp_dispatch_irqs(mdp_kms, status); + + for (id = 0; id < priv->num_crtcs; id++) + if (status & mdp5_crtc_vblank(priv->crtcs[id])) + drm_handle_vblank(dev, id); + + return IRQ_HANDLED; +} + +int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp5_crtc_vblank(crtc), true); + pm_runtime_put_sync(dev); + + return 0; +} + +void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + + pm_runtime_get_sync(dev); + mdp_update_vblank_mask(to_mdp_kms(kms), + mdp5_crtc_vblank(crtc), false); + pm_runtime_put_sync(dev); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c new file mode 100644 index 000000000..29ae5c961 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -0,0 +1,1009 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include "msm_drv.h" +#include "msm_gem.h" +#include "msm_mmu.h" +#include "mdp5_kms.h" + +static int mdp5_hw_init(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct device *dev = &mdp5_kms->pdev->dev; + unsigned long flags; + + pm_runtime_get_sync(dev); + + /* Magic unknown register writes: + * + * W VBIF:0x004 00000001 (mdss_mdp.c:839) + * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839) + * W MDP5:0x2e4 0x55 (mdss_mdp.c:839) + * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839) + * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839) + * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839) + * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839) + * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839) + * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839) + * + * Downstream fbdev driver gets these register offsets/values + * from DT.. not really sure what these registers are or if + * different values for different boards/SoC's, etc. I guess + * they are the golden registers. + * + * Not setting these does not seem to cause any problem. But + * we may be getting lucky with the bootloader initializing + * them for us. OTOH, if we can always count on the bootloader + * setting the golden registers, then perhaps we don't need to + * care. + */ + + spin_lock_irqsave(&mdp5_kms->resource_lock, flags); + mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); + spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); + + mdp5_ctlm_hw_reset(mdp5_kms->ctlm); + + pm_runtime_put_sync(dev); + + return 0; +} + +/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct mdp5_global_state * +mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms) +{ + return to_mdp5_global_state(mdp5_kms->glob_state.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct drm_private_state *priv_state; + int ret; + + ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_mdp5_global_state(priv_state); +} + +static struct drm_private_state * +mdp5_global_duplicate_state(struct drm_private_obj *obj) +{ + struct mdp5_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void mdp5_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); + + kfree(mdp5_state); +} + +static const struct drm_private_state_funcs mdp5_global_state_funcs = { + .atomic_duplicate_state = mdp5_global_duplicate_state, + .atomic_destroy_state = mdp5_global_destroy_state, +}; + +static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) +{ + struct mdp5_global_state *state; + + drm_modeset_lock_init(&mdp5_kms->glob_state_lock); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->mdp5_kms = mdp5_kms; + + drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state, + &state->base, + &mdp5_global_state_funcs); + return 0; +} + +static void mdp5_enable_commit(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + pm_runtime_get_sync(&mdp5_kms->pdev->dev); +} + +static void mdp5_disable_commit(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + pm_runtime_put_sync(&mdp5_kms->pdev->dev); +} + +static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct mdp5_global_state *global_state; + + global_state = mdp5_get_existing_global_state(mdp5_kms); + + if (mdp5_kms->smp) + mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp); +} + +static void mdp5_flush_commit(struct msm_kms *kms, unsigned crtc_mask) +{ + /* TODO */ +} + +static void mdp5_wait_flush(struct msm_kms *kms, unsigned crtc_mask) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct drm_crtc *crtc; + + for_each_crtc_mask(mdp5_kms->dev, crtc, crtc_mask) + mdp5_crtc_wait_for_commit_done(crtc); +} + +static void mdp5_complete_commit(struct msm_kms *kms, unsigned crtc_mask) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct mdp5_global_state *global_state; + + global_state = mdp5_get_existing_global_state(mdp5_kms); + + if (mdp5_kms->smp) + mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp); +} + +static int mdp5_set_split_display(struct msm_kms *kms, + struct drm_encoder *encoder, + struct drm_encoder *slave_encoder, + bool is_cmd_mode) +{ + if (is_cmd_mode) + return mdp5_cmd_encoder_set_split_display(encoder, + slave_encoder); + else + return mdp5_vid_encoder_set_split_display(encoder, + slave_encoder); +} + +static void mdp5_destroy(struct mdp5_kms *mdp5_kms); + +static void mdp5_kms_destroy(struct msm_kms *kms) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + struct msm_gem_address_space *aspace = kms->aspace; + int i; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) + mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) + mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); + + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu); + msm_gem_address_space_put(aspace); + } + + mdp_kms_destroy(&mdp5_kms->base); + mdp5_destroy(mdp5_kms); +} + +#ifdef CONFIG_DEBUG_FS +static int smp_show(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct drm_printer p = drm_seq_file_printer(m); + + if (!mdp5_kms->smp) { + drm_printf(&p, "no SMP pool\n"); + return 0; + } + + mdp5_smp_dump(mdp5_kms->smp, &p); + + return 0; +} + +static struct drm_info_list mdp5_debugfs_list[] = { + {"smp", smp_show }, +}; + +static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) +{ + drm_debugfs_create_files(mdp5_debugfs_list, + ARRAY_SIZE(mdp5_debugfs_list), + minor->debugfs_root, minor); + + return 0; +} +#endif + +static const struct mdp_kms_funcs kms_funcs = { + .base = { + .hw_init = mdp5_hw_init, + .irq_preinstall = mdp5_irq_preinstall, + .irq_postinstall = mdp5_irq_postinstall, + .irq_uninstall = mdp5_irq_uninstall, + .irq = mdp5_irq, + .enable_vblank = mdp5_enable_vblank, + .disable_vblank = mdp5_disable_vblank, + .flush_commit = mdp5_flush_commit, + .enable_commit = mdp5_enable_commit, + .disable_commit = mdp5_disable_commit, + .prepare_commit = mdp5_prepare_commit, + .wait_flush = mdp5_wait_flush, + .complete_commit = mdp5_complete_commit, + .get_format = mdp_get_format, + .set_split_display = mdp5_set_split_display, + .destroy = mdp5_kms_destroy, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = mdp5_kms_debugfs_init, +#endif + }, + .set_irqmask = mdp5_set_irqmask, +}; + +static int mdp5_disable(struct mdp5_kms *mdp5_kms) +{ + DBG(""); + + mdp5_kms->enable_count--; + WARN_ON(mdp5_kms->enable_count < 0); + + clk_disable_unprepare(mdp5_kms->tbu_rt_clk); + clk_disable_unprepare(mdp5_kms->tbu_clk); + clk_disable_unprepare(mdp5_kms->ahb_clk); + clk_disable_unprepare(mdp5_kms->axi_clk); + clk_disable_unprepare(mdp5_kms->core_clk); + clk_disable_unprepare(mdp5_kms->lut_clk); + + return 0; +} + +static int mdp5_enable(struct mdp5_kms *mdp5_kms) +{ + DBG(""); + + mdp5_kms->enable_count++; + + clk_prepare_enable(mdp5_kms->ahb_clk); + clk_prepare_enable(mdp5_kms->axi_clk); + clk_prepare_enable(mdp5_kms->core_clk); + clk_prepare_enable(mdp5_kms->lut_clk); + clk_prepare_enable(mdp5_kms->tbu_clk); + clk_prepare_enable(mdp5_kms->tbu_rt_clk); + + return 0; +} + +static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf, + struct mdp5_ctl *ctl) +{ + struct drm_device *dev = mdp5_kms->dev; + struct drm_encoder *encoder; + + encoder = mdp5_encoder_init(dev, intf, ctl); + if (IS_ERR(encoder)) { + DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n"); + return encoder; + } + + return encoder; +} + +static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) +{ + const enum mdp5_intf_type *intfs = hw_cfg->intf.connect; + const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect); + int id = 0, i; + + for (i = 0; i < intf_cnt; i++) { + if (intfs[i] == INTF_DSI) { + if (intf_num == i) + return id; + + id++; + } + } + + return -EINVAL; +} + +static int modeset_init_intf(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf) +{ + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; + struct mdp5_ctl *ctl; + struct drm_encoder *encoder; + int ret = 0; + + switch (intf->type) { + case INTF_eDP: + DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num); + break; + case INTF_HDMI: + if (!priv->hdmi) + break; + + ctl = mdp5_ctlm_request(ctlm, intf->num); + if (!ctl) { + ret = -EINVAL; + break; + } + + encoder = construct_encoder(mdp5_kms, intf, ctl); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; + } + + ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder); + break; + case INTF_DSI: + { + const struct mdp5_cfg_hw *hw_cfg = + mdp5_cfg_get_hw_config(mdp5_kms->cfg); + int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); + + if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { + DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n", + intf->num); + ret = -EINVAL; + break; + } + + if (!priv->dsi[dsi_id]) + break; + + ctl = mdp5_ctlm_request(ctlm, intf->num); + if (!ctl) { + ret = -EINVAL; + break; + } + + encoder = construct_encoder(mdp5_kms, intf, ctl); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; + } + + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); + if (!ret) + mdp5_encoder_set_intf_mode(encoder, msm_dsi_is_cmd_mode(priv->dsi[dsi_id])); + + break; + } + default: + DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type); + ret = -EINVAL; + break; + } + + return ret; +} + +static int modeset_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + struct msm_drm_private *priv = dev->dev_private; + unsigned int num_crtcs; + int i, ret, pi = 0, ci = 0; + struct drm_plane *primary[MAX_BASES] = { NULL }; + struct drm_plane *cursor[MAX_BASES] = { NULL }; + struct drm_encoder *encoder; + unsigned int num_encoders; + + /* + * Construct encoders and modeset initialize connector devices + * for each external display interface. + */ + for (i = 0; i < mdp5_kms->num_intfs; i++) { + ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); + if (ret) + goto fail; + } + + num_encoders = 0; + drm_for_each_encoder(encoder, dev) + num_encoders++; + + /* + * We should ideally have less number of encoders (set up by parsing + * the MDP5 interfaces) than the number of layer mixers present in HW, + * but let's be safe here anyway + */ + num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers); + + /* + * Construct planes equaling the number of hw pipes, and CRTCs for the + * N encoders set up by the driver. The first N planes become primary + * planes for the CRTCs, with the remainder as overlay planes: + */ + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + struct drm_plane *plane; + enum drm_plane_type type; + + if (i < num_crtcs) + type = DRM_PLANE_TYPE_PRIMARY; + else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) + type = DRM_PLANE_TYPE_CURSOR; + else + type = DRM_PLANE_TYPE_OVERLAY; + + plane = mdp5_plane_init(dev, type); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret); + goto fail; + } + + if (type == DRM_PLANE_TYPE_PRIMARY) + primary[pi++] = plane; + if (type == DRM_PLANE_TYPE_CURSOR) + cursor[ci++] = plane; + } + + for (i = 0; i < num_crtcs; i++) { + struct drm_crtc *crtc; + + crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); + if (IS_ERR(crtc)) { + ret = PTR_ERR(crtc); + DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); + goto fail; + } + priv->crtcs[priv->num_crtcs++] = crtc; + } + + /* + * Now that we know the number of crtcs we've created, set the possible + * crtcs for the encoders + */ + drm_for_each_encoder(encoder, dev) + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; + + return 0; + +fail: + return ret; +} + +static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms, + u32 *major, u32 *minor) +{ + struct device *dev = &mdp5_kms->pdev->dev; + u32 version; + + pm_runtime_get_sync(dev); + version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION); + pm_runtime_put_sync(dev); + + *major = FIELD(version, MDP5_HW_VERSION_MAJOR); + *minor = FIELD(version, MDP5_HW_VERSION_MINOR); + + DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor); +} + +static int get_clk(struct platform_device *pdev, struct clk **clkp, + const char *name, bool mandatory) +{ + struct device *dev = &pdev->dev; + struct clk *clk = msm_clk_get(pdev, name); + if (IS_ERR(clk) && mandatory) { + DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + if (IS_ERR(clk)) + DBG("skipping %s", name); + else + *clkp = clk; + + return 0; +} + +static int mdp5_init(struct platform_device *pdev, struct drm_device *dev); + +static int mdp5_kms_init(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev; + struct mdp5_kms *mdp5_kms; + struct mdp5_cfg *config; + struct msm_kms *kms; + struct msm_gem_address_space *aspace; + int irq, i, ret; + + ret = mdp5_init(to_platform_device(dev->dev), dev); + if (ret) + return ret; + + /* priv->kms would have been populated by the MDP5 driver */ + kms = priv->kms; + if (!kms) + return -ENOMEM; + + mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + pdev = mdp5_kms->pdev; + + ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n"); + goto fail; + } + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) { + ret = -EINVAL; + DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n"); + goto fail; + } + + kms->irq = irq; + + config = mdp5_cfg_get_config(mdp5_kms->cfg); + + /* make sure things are off before attaching iommu (bootloader could + * have left things on, in which case we'll start getting faults if + * we don't disable): + */ + pm_runtime_get_sync(&pdev->dev); + for (i = 0; i < MDP5_INTF_NUM_MAX; i++) { + if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) || + !config->hw->intf.base[i]) + continue; + mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); + + mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); + } + mdelay(16); + + aspace = msm_kms_init_aspace(mdp5_kms->dev); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); + goto fail; + } + + kms->aspace = aspace; + + pm_runtime_put_sync(&pdev->dev); + + ret = modeset_init(mdp5_kms); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret); + goto fail; + } + + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 0xffff; + dev->mode_config.max_height = 0xffff; + + dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ + dev->vblank_disable_immediate = true; + + return 0; +fail: + if (kms) + mdp5_kms_destroy(kms); + + return ret; +} + +static void mdp5_destroy(struct mdp5_kms *mdp5_kms) +{ + int i; + + if (mdp5_kms->ctlm) + mdp5_ctlm_destroy(mdp5_kms->ctlm); + if (mdp5_kms->smp) + mdp5_smp_destroy(mdp5_kms->smp); + if (mdp5_kms->cfg) + mdp5_cfg_destroy(mdp5_kms->cfg); + + for (i = 0; i < mdp5_kms->num_intfs; i++) + kfree(mdp5_kms->intfs[i]); + + if (mdp5_kms->rpm_enabled) + pm_runtime_disable(&mdp5_kms->pdev->dev); + + drm_atomic_private_obj_fini(&mdp5_kms->glob_state); + drm_modeset_lock_fini(&mdp5_kms->glob_state_lock); +} + +static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, + const enum mdp5_pipe *pipes, const uint32_t *offsets, + uint32_t caps) +{ + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < cnt; i++) { + struct mdp5_hw_pipe *hwpipe; + + hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); + if (IS_ERR(hwpipe)) { + ret = PTR_ERR(hwpipe); + DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n", + pipe2name(pipes[i]), ret); + return ret; + } + hwpipe->idx = mdp5_kms->num_hwpipes; + mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; + } + + return 0; +} + +static int hwpipe_init(struct mdp5_kms *mdp5_kms) +{ + static const enum mdp5_pipe rgb_planes[] = { + SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, + }; + static const enum mdp5_pipe vig_planes[] = { + SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, + }; + static const enum mdp5_pipe dma_planes[] = { + SSPP_DMA0, SSPP_DMA1, + }; + static const enum mdp5_pipe cursor_planes[] = { + SSPP_CURSOR0, SSPP_CURSOR1, + }; + const struct mdp5_cfg_hw *hw_cfg; + int ret; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* Construct RGB pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, + hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); + if (ret) + return ret; + + /* Construct video (VIG) pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, + hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); + if (ret) + return ret; + + /* Construct DMA pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, + hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); + if (ret) + return ret; + + /* Construct cursor pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, + cursor_planes, hw_cfg->pipe_cursor.base, + hw_cfg->pipe_cursor.caps); + if (ret) + return ret; + + return 0; +} + +static int hwmixer_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + const struct mdp5_cfg_hw *hw_cfg; + int i, ret; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + for (i = 0; i < hw_cfg->lm.count; i++) { + struct mdp5_hw_mixer *mixer; + + mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); + if (IS_ERR(mixer)) { + ret = PTR_ERR(mixer); + DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n", + i, ret); + return ret; + } + + mixer->idx = mdp5_kms->num_hwmixers; + mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; + } + + return 0; +} + +static int interface_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + const struct mdp5_cfg_hw *hw_cfg; + const enum mdp5_intf_type *intf_types; + int i; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + intf_types = hw_cfg->intf.connect; + + for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { + struct mdp5_interface *intf; + + if (intf_types[i] == INTF_DISABLED) + continue; + + intf = kzalloc(sizeof(*intf), GFP_KERNEL); + if (!intf) { + DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i); + return -ENOMEM; + } + + intf->num = i; + intf->type = intf_types[i]; + intf->mode = MDP5_INTF_MODE_NONE; + intf->idx = mdp5_kms->num_intfs; + mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; + } + + return 0; +} + +static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms; + struct mdp5_cfg *config; + u32 major, minor; + int ret; + + mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL); + if (!mdp5_kms) { + ret = -ENOMEM; + goto fail; + } + + spin_lock_init(&mdp5_kms->resource_lock); + + mdp5_kms->dev = dev; + mdp5_kms->pdev = pdev; + + ret = mdp5_global_obj_init(mdp5_kms); + if (ret) + goto fail; + + mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys"); + if (IS_ERR(mdp5_kms->mmio)) { + ret = PTR_ERR(mdp5_kms->mmio); + goto fail; + } + + /* mandatory clocks: */ + ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); + if (ret) + goto fail; + ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); + if (ret) + goto fail; + ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); + if (ret) + goto fail; + ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); + if (ret) + goto fail; + + /* optional clocks: */ + get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); + get_clk(pdev, &mdp5_kms->tbu_clk, "tbu", false); + get_clk(pdev, &mdp5_kms->tbu_rt_clk, "tbu_rt", false); + + /* we need to set a default rate before enabling. Set a safe + * rate first, then figure out hw revision, and then set a + * more optimal rate: + */ + clk_set_rate(mdp5_kms->core_clk, 200000000); + + /* set uninit-ed kms */ + priv->kms = &mdp5_kms->base.base; + + pm_runtime_enable(&pdev->dev); + mdp5_kms->rpm_enabled = true; + + read_mdp_hw_revision(mdp5_kms, &major, &minor); + + mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); + if (IS_ERR(mdp5_kms->cfg)) { + ret = PTR_ERR(mdp5_kms->cfg); + mdp5_kms->cfg = NULL; + goto fail; + } + + config = mdp5_cfg_get_config(mdp5_kms->cfg); + mdp5_kms->caps = config->hw->mdp.caps; + + /* TODO: compute core clock rate at runtime */ + clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk); + + /* + * Some chipsets have a Shared Memory Pool (SMP), while others + * have dedicated latency buffering per source pipe instead; + * this section initializes the SMP: + */ + if (mdp5_kms->caps & MDP_CAP_SMP) { + mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); + if (IS_ERR(mdp5_kms->smp)) { + ret = PTR_ERR(mdp5_kms->smp); + mdp5_kms->smp = NULL; + goto fail; + } + } + + mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg); + if (IS_ERR(mdp5_kms->ctlm)) { + ret = PTR_ERR(mdp5_kms->ctlm); + mdp5_kms->ctlm = NULL; + goto fail; + } + + ret = hwpipe_init(mdp5_kms); + if (ret) + goto fail; + + ret = hwmixer_init(mdp5_kms); + if (ret) + goto fail; + + ret = interface_init(mdp5_kms); + if (ret) + goto fail; + + return 0; +fail: + if (mdp5_kms) + mdp5_destroy(mdp5_kms); + return ret; +} + +static int mdp5_setup_interconnect(struct platform_device *pdev) +{ + struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem"); + struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem"); + struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem"); + + if (IS_ERR(path0)) + return PTR_ERR(path0); + + if (!path0) { + /* no interconnect support is not necessarily a fatal + * condition, the platform may simply not have an + * interconnect driver yet. But warn about it in case + * bootloader didn't setup bus clocks high enough for + * scanout. + */ + dev_warn(&pdev->dev, "No interconnect support may cause display underflows!\n"); + return 0; + } + + icc_set_bw(path0, 0, MBps_to_icc(6400)); + + if (!IS_ERR_OR_NULL(path1)) + icc_set_bw(path1, 0, MBps_to_icc(6400)); + if (!IS_ERR_OR_NULL(path_rot)) + icc_set_bw(path_rot, 0, MBps_to_icc(6400)); + + return 0; +} + +static int mdp5_dev_probe(struct platform_device *pdev) +{ + int ret; + + DBG(""); + + ret = mdp5_setup_interconnect(pdev); + if (ret) + return ret; + + return msm_drv_probe(&pdev->dev, mdp5_kms_init); +} + +static int mdp5_dev_remove(struct platform_device *pdev) +{ + DBG(""); + component_master_del(&pdev->dev, &msm_drm_ops); + return 0; +} + +static __maybe_unused int mdp5_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + + DBG(""); + + return mdp5_disable(mdp5_kms); +} + +static __maybe_unused int mdp5_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_drm_private *priv = platform_get_drvdata(pdev); + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + + DBG(""); + + return mdp5_enable(mdp5_kms); +} + +static const struct dev_pm_ops mdp5_pm_ops = { + SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL) + .prepare = msm_pm_prepare, + .complete = msm_pm_complete, +}; + +static const struct of_device_id mdp5_dt_match[] = { + { .compatible = "qcom,mdp5", }, + /* to support downstream DT files */ + { .compatible = "qcom,mdss_mdp", }, + {} +}; +MODULE_DEVICE_TABLE(of, mdp5_dt_match); + +static struct platform_driver mdp5_driver = { + .probe = mdp5_dev_probe, + .remove = mdp5_dev_remove, + .shutdown = msm_drv_shutdown, + .driver = { + .name = "msm_mdp", + .of_match_table = mdp5_dt_match, + .pm = &mdp5_pm_ops, + }, +}; + +void __init msm_mdp_register(void) +{ + DBG(""); + platform_driver_register(&mdp5_driver); +} + +void __exit msm_mdp_unregister(void) +{ + DBG(""); + platform_driver_unregister(&mdp5_driver); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h new file mode 100644 index 000000000..29bf11f08 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -0,0 +1,327 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __MDP5_KMS_H__ +#define __MDP5_KMS_H__ + +#include "msm_drv.h" +#include "msm_kms.h" +#include "disp/mdp_kms.h" +#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ +#include "mdp5.xml.h" +#include "mdp5_pipe.h" +#include "mdp5_mixer.h" +#include "mdp5_ctl.h" +#include "mdp5_smp.h" + +struct mdp5_kms { + struct mdp_kms base; + + struct drm_device *dev; + + struct platform_device *pdev; + + unsigned num_hwpipes; + struct mdp5_hw_pipe *hwpipes[SSPP_MAX]; + + unsigned num_hwmixers; + struct mdp5_hw_mixer *hwmixers[8]; + + unsigned num_intfs; + struct mdp5_interface *intfs[5]; + + struct mdp5_cfg_handler *cfg; + uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ + + /* + * Global private object state, Do not access directly, use + * mdp5_global_get_state() + */ + struct drm_modeset_lock glob_state_lock; + struct drm_private_obj glob_state; + + struct mdp5_smp *smp; + struct mdp5_ctl_manager *ctlm; + + /* io/register spaces: */ + void __iomem *mmio; + + struct clk *axi_clk; + struct clk *ahb_clk; + struct clk *core_clk; + struct clk *lut_clk; + struct clk *tbu_clk; + struct clk *tbu_rt_clk; + struct clk *vsync_clk; + + /* + * lock to protect access to global resources: ie., following register: + * - REG_MDP5_DISP_INTF_SEL + */ + spinlock_t resource_lock; + + bool rpm_enabled; + + struct mdp_irq error_handler; + + int enable_count; +}; +#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) + +/* Global private object state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + */ +#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base) +struct mdp5_global_state { + struct drm_private_state base; + + struct drm_atomic_state *state; + struct mdp5_kms *mdp5_kms; + + struct mdp5_hw_pipe_state hwpipe; + struct mdp5_hw_mixer_state hwmixer; + struct mdp5_smp_state smp; +}; + +struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms); +struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s); + +/* Atomic plane state. Subclasses the base drm_plane_state in order to + * track assigned hwpipe and hw specific state. + */ +struct mdp5_plane_state { + struct drm_plane_state base; + + struct mdp5_hw_pipe *hwpipe; + struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */ + + /* assigned by crtc blender */ + enum mdp_mixer_stage_id stage; + + /* whether attached CRTC needs pixel data explicitly flushed to + * display (ex. DSI command mode display) + */ + bool needs_dirtyfb; +}; +#define to_mdp5_plane_state(x) \ + container_of(x, struct mdp5_plane_state, base) + +struct mdp5_pipeline { + struct mdp5_interface *intf; + struct mdp5_hw_mixer *mixer; + struct mdp5_hw_mixer *r_mixer; /* right mixer */ +}; + +struct mdp5_crtc_state { + struct drm_crtc_state base; + + struct mdp5_ctl *ctl; + struct mdp5_pipeline pipeline; + + /* these are derivatives of intf/mixer state in mdp5_pipeline */ + u32 vblank_irqmask; + u32 err_irqmask; + u32 pp_done_irqmask; + + bool cmd_mode; + + /* should we not write CTL[n].START register on flush? If the + * encoder has changed this is set to true, since encoder->enable() + * is called after crtc state is committed, but we only want to + * write the CTL[n].START register once. This lets us defer + * writing CTL[n].START until encoder->enable() + */ + bool defer_start; +}; +#define to_mdp5_crtc_state(x) \ + container_of(x, struct mdp5_crtc_state, base) + +enum mdp5_intf_mode { + MDP5_INTF_MODE_NONE = 0, + + /* Modes used for DSI interface (INTF_DSI type): */ + MDP5_INTF_DSI_MODE_VIDEO, + MDP5_INTF_DSI_MODE_COMMAND, + + /* Modes used for WB interface (INTF_WB type): */ + MDP5_INTF_WB_MODE_BLOCK, + MDP5_INTF_WB_MODE_LINE, +}; + +struct mdp5_interface { + int idx; + int num; /* display interface number */ + enum mdp5_intf_type type; + enum mdp5_intf_mode mode; +}; + +struct mdp5_encoder { + struct drm_encoder base; + spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ + bool enabled; + uint32_t bsc; + + struct mdp5_interface *intf; + struct mdp5_ctl *ctl; +}; +#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) + +static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) +{ + WARN_ON(mdp5_kms->enable_count <= 0); + msm_writel(data, mdp5_kms->mmio + reg); +} + +static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) +{ + WARN_ON(mdp5_kms->enable_count <= 0); + return msm_readl(mdp5_kms->mmio + reg); +} + +static inline const char *stage2name(enum mdp_mixer_stage_id stage) +{ + static const char *names[] = { +#define NAME(n) [n] = #n + NAME(STAGE_UNUSED), NAME(STAGE_BASE), + NAME(STAGE0), NAME(STAGE1), NAME(STAGE2), + NAME(STAGE3), NAME(STAGE4), NAME(STAGE6), +#undef NAME + }; + return names[stage]; +} + +static inline const char *pipe2name(enum mdp5_pipe pipe) +{ + static const char *names[] = { +#define NAME(n) [SSPP_ ## n] = #n + NAME(VIG0), NAME(VIG1), NAME(VIG2), + NAME(RGB0), NAME(RGB1), NAME(RGB2), + NAME(DMA0), NAME(DMA1), + NAME(VIG3), NAME(RGB3), + NAME(CURSOR0), NAME(CURSOR1), +#undef NAME + }; + return names[pipe]; +} + +static inline int pipe2nclients(enum mdp5_pipe pipe) +{ + switch (pipe) { + case SSPP_RGB0: + case SSPP_RGB1: + case SSPP_RGB2: + case SSPP_RGB3: + return 1; + default: + return 3; + } +} + +static inline uint32_t intf2err(int intf_num) +{ + switch (intf_num) { + case 0: return MDP5_IRQ_INTF0_UNDER_RUN; + case 1: return MDP5_IRQ_INTF1_UNDER_RUN; + case 2: return MDP5_IRQ_INTF2_UNDER_RUN; + case 3: return MDP5_IRQ_INTF3_UNDER_RUN; + default: return 0; + } +} + +static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer, + struct mdp5_interface *intf) +{ + /* + * In case of DSI Command Mode, the Ping Pong's read pointer IRQ + * acts as a Vblank signal. The Ping Pong buffer used is bound to + * layer mixer. + */ + + if ((intf->type == INTF_DSI) && + (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) + return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp; + + if (intf->type == INTF_WB) + return MDP5_IRQ_WB_2_DONE; + + switch (intf->num) { + case 0: return MDP5_IRQ_INTF0_VSYNC; + case 1: return MDP5_IRQ_INTF1_VSYNC; + case 2: return MDP5_IRQ_INTF2_VSYNC; + case 3: return MDP5_IRQ_INTF3_VSYNC; + default: return 0; + } +} + +static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer) +{ + return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp; +} + +void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); +void mdp5_irq_preinstall(struct msm_kms *kms); +int mdp5_irq_postinstall(struct msm_kms *kms); +void mdp5_irq_uninstall(struct msm_kms *kms); +irqreturn_t mdp5_irq(struct msm_kms *kms); +int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); +int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); +void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); + +uint32_t mdp5_plane_get_flush(struct drm_plane *plane); +enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); +enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane); +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type); + +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); +uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); + +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc); +struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc); +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc); +void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); +struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id); + +struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, + struct mdp5_interface *intf, struct mdp5_ctl *ctl); +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode); +int mdp5_encoder_get_linecount(struct drm_encoder *encoder); +u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); + +#ifdef CONFIG_DRM_MSM_DSI +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder); +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder); +int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); +#else +static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} +static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +{ +} +static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) +{ +} +static inline int mdp5_cmd_encoder_set_split_display( + struct drm_encoder *encoder, struct drm_encoder *slave_encoder) +{ + return -EINVAL; +} +#endif + +#endif /* __MDP5_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c new file mode 100644 index 000000000..2536def2a --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017 The Linux Foundation. All rights reserved. + */ + +#include "mdp5_kms.h" + +/* + * As of now, there are only 2 combinations possible for source split: + * + * Left | Right + * -----|------ + * LM0 | LM1 + * LM2 | LM5 + * + */ +static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 }; + +static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm) +{ + int i; + int pair_lm; + + pair_lm = lm_right_pair[lm]; + if (pair_lm < 0) + return -EINVAL; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) { + struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i]; + + if (mixer->lm == pair_lm) + return mixer->idx; + } + + return -1; +} + +int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, + uint32_t caps, struct mdp5_hw_mixer **mixer, + struct mdp5_hw_mixer **r_mixer) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_global_state *global_state = mdp5_get_global_state(s); + struct mdp5_hw_mixer_state *new_state; + int i; + + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + new_state = &global_state->hwmixer; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) { + struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i]; + + /* + * skip if already in-use by a different CRTC. If there is a + * mixer already assigned to this CRTC, it means this call is + * a request to get an additional right mixer. Assume that the + * existing mixer is the 'left' one, and try to see if we can + * get its corresponding 'right' pair. + */ + if (new_state->hwmixer_to_crtc[cur->idx] && + new_state->hwmixer_to_crtc[cur->idx] != crtc) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + if (r_mixer) { + int pair_idx; + + pair_idx = get_right_pair_idx(mdp5_kms, cur->lm); + if (pair_idx < 0) + return -EINVAL; + + if (new_state->hwmixer_to_crtc[pair_idx]) + continue; + + *r_mixer = mdp5_kms->hwmixers[pair_idx]; + } + + /* + * prefer a pair-able LM over an unpairable one. We can + * switch the CRTC from Normal mode to Source Split mode + * without requiring a full modeset if we had already + * assigned this CRTC a pair-able LM. + * + * TODO: There will be assignment sequences which would + * result in the CRTC requiring a full modeset, even + * if we have the LM resources to prevent it. For a platform + * with a few displays, we don't run out of pair-able LMs + * so easily. For now, ignore the possibility of requiring + * a full modeset. + */ + if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR) + *mixer = cur; + } + + if (!(*mixer)) + return -ENOMEM; + + if (r_mixer && !(*r_mixer)) + return -ENOMEM; + + DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name); + + new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc; + if (r_mixer) { + DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm, + crtc->name); + new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc; + } + + return 0; +} + +int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) +{ + struct mdp5_global_state *global_state = mdp5_get_global_state(s); + struct mdp5_hw_mixer_state *new_state; + + if (!mixer) + return 0; + + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + + new_state = &global_state->hwmixer; + + if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx])) + return -EINVAL; + + DBG("%s: release from crtc %s", mixer->name, + new_state->hwmixer_to_crtc[mixer->idx]->name); + + new_state->hwmixer_to_crtc[mixer->idx] = NULL; + + return 0; +} + +void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer) +{ + kfree(mixer); +} + +static const char * const mixer_names[] = { + "LM0", "LM1", "LM2", "LM3", "LM4", "LM5", +}; + +struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm) +{ + struct mdp5_hw_mixer *mixer; + + mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); + if (!mixer) + return ERR_PTR(-ENOMEM); + + mixer->name = mixer_names[lm->id]; + mixer->lm = lm->id; + mixer->caps = lm->caps; + mixer->pp = lm->pp; + mixer->dspp = lm->dspp; + mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id); + + return mixer; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h new file mode 100644 index 000000000..545ee223b --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 The Linux Foundation. All rights reserved. + */ + +#ifndef __MDP5_LM_H__ +#define __MDP5_LM_H__ + +/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */ +struct mdp5_hw_mixer { + int idx; + + const char *name; + + int lm; /* the LM instance # */ + uint32_t caps; + int pp; + int dspp; + + uint32_t flush_mask; /* used to commit LM registers */ +}; + +/* global atomic state of assignment between CRTCs and Layer Mixers: */ +struct mdp5_hw_mixer_state { + struct drm_crtc *hwmixer_to_crtc[8]; +}; + +struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm); +void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm); +int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, + uint32_t caps, struct mdp5_hw_mixer **mixer, + struct mdp5_hw_mixer **r_mixer); +int mdp5_mixer_release(struct drm_atomic_state *s, + struct mdp5_hw_mixer *mixer); + +#endif /* __MDP5_LM_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c new file mode 100644 index 000000000..e4b8a7898 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark + */ + +#include "mdp5_kms.h" + +int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, + uint32_t caps, uint32_t blkcfg, + struct mdp5_hw_pipe **hwpipe, + struct mdp5_hw_pipe **r_hwpipe) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_global_state *new_global_state, *old_global_state; + struct mdp5_hw_pipe_state *old_state, *new_state; + int i, j; + + new_global_state = mdp5_get_global_state(s); + if (IS_ERR(new_global_state)) + return PTR_ERR(new_global_state); + + /* grab old_state after mdp5_get_global_state(), since now we hold lock: */ + old_global_state = mdp5_get_existing_global_state(mdp5_kms); + + old_state = &old_global_state->hwpipe; + new_state = &new_global_state->hwpipe; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i]; + + /* skip if already in-use.. check both new and old state, + * since we cannot immediately re-use a pipe that is + * released in the current update in some cases: + * (1) mdp5 can have SMP (non-double-buffered) + * (2) hw pipe previously assigned to different CRTC + * (vblanks might not be aligned) + */ + if (new_state->hwpipe_to_plane[cur->idx] || + old_state->hwpipe_to_plane[cur->idx]) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + /* + * don't assign a cursor pipe to a plane that isn't going to + * be used as a cursor + */ + if (cur->caps & MDP_PIPE_CAP_CURSOR && + plane->type != DRM_PLANE_TYPE_CURSOR) + continue; + + /* possible candidate, take the one with the + * fewest unneeded caps bits set: + */ + if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) < + hweight_long((*hwpipe)->caps & ~caps))) { + bool r_found = false; + + if (r_hwpipe) { + for (j = i + 1; j < mdp5_kms->num_hwpipes; + j++) { + struct mdp5_hw_pipe *r_cur = + mdp5_kms->hwpipes[j]; + + /* reject different types of hwpipes */ + if (r_cur->caps != cur->caps) + continue; + + /* respect priority, eg. VIG0 > VIG1 */ + if (cur->pipe > r_cur->pipe) + continue; + + *r_hwpipe = r_cur; + r_found = true; + break; + } + } + + if (!r_hwpipe || r_found) + *hwpipe = cur; + } + } + + if (!(*hwpipe)) + return -ENOMEM; + + if (r_hwpipe && !(*r_hwpipe)) + return -ENOMEM; + + if (mdp5_kms->smp) { + int ret; + + /* We don't support SMP and 2 hwpipes/plane together */ + WARN_ON(r_hwpipe); + + DBG("%s: alloc SMP blocks", (*hwpipe)->name); + ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp, + (*hwpipe)->pipe, blkcfg); + if (ret) + return -ENOMEM; + + (*hwpipe)->blkcfg = blkcfg; + } + + DBG("%s: assign to plane %s for caps %x", + (*hwpipe)->name, plane->name, caps); + new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane; + + if (r_hwpipe) { + DBG("%s: assign to right of plane %s for caps %x", + (*r_hwpipe)->name, plane->name, caps); + new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane; + } + + return 0; +} + +int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_global_state *state; + struct mdp5_hw_pipe_state *new_state; + + if (!hwpipe) + return 0; + + state = mdp5_get_global_state(s); + if (IS_ERR(state)) + return PTR_ERR(state); + + new_state = &state->hwpipe; + + if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx])) + return -EINVAL; + + DBG("%s: release from plane %s", hwpipe->name, + new_state->hwpipe_to_plane[hwpipe->idx]->name); + + if (mdp5_kms->smp) { + DBG("%s: free SMP blocks", hwpipe->name); + mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe); + } + + new_state->hwpipe_to_plane[hwpipe->idx] = NULL; + + return 0; +} + +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) +{ + kfree(hwpipe); +} + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps) +{ + struct mdp5_hw_pipe *hwpipe; + + hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL); + if (!hwpipe) + return ERR_PTR(-ENOMEM); + + hwpipe->name = pipe2name(pipe); + hwpipe->pipe = pipe; + hwpipe->reg_offset = reg_offset; + hwpipe->caps = caps; + hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); + + return hwpipe; +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h new file mode 100644 index 000000000..cca67938c --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark + */ + +#ifndef __MDP5_PIPE_H__ +#define __MDP5_PIPE_H__ + +/* TODO: Add SSPP_MAX in mdp5.xml.h */ +#define SSPP_MAX (SSPP_CURSOR1 + 1) + +/* represents a hw pipe, which is dynamically assigned to a plane */ +struct mdp5_hw_pipe { + int idx; + + const char *name; + enum mdp5_pipe pipe; + + uint32_t reg_offset; + uint32_t caps; + + uint32_t flush_mask; /* used to commit pipe registers */ + + /* number of smp blocks per plane, ie: + * nblks_y | (nblks_u << 8) | (nblks_v << 16) + */ + uint32_t blkcfg; +}; + +/* global atomic state of assignment between pipes and planes: */ +struct mdp5_hw_pipe_state { + struct drm_plane *hwpipe_to_plane[SSPP_MAX]; +}; + +int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, + uint32_t caps, uint32_t blkcfg, + struct mdp5_hw_pipe **hwpipe, + struct mdp5_hw_pipe **r_hwpipe); +int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps); +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe); + +#endif /* __MDP5_PIPE_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c new file mode 100644 index 000000000..0d5ff03cb --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -0,0 +1,1048 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mdp5_kms.h" + +struct mdp5_plane { + struct drm_plane base; + + uint32_t nformats; + uint32_t formats[32]; +}; +#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) + +static int mdp5_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_rect *src, struct drm_rect *dest); + +static struct mdp5_kms *get_kms(struct drm_plane *plane) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static bool plane_enabled(struct drm_plane_state *state) +{ + return state->visible; +} + +static void mdp5_plane_destroy(struct drm_plane *plane) +{ + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + + drm_plane_cleanup(plane); + + kfree(mdp5_plane); +} + +/* helper to install properties which are common to planes and crtcs */ +static void mdp5_plane_install_properties(struct drm_plane *plane, + struct drm_mode_object *obj) +{ + unsigned int zpos; + + drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, + DRM_MODE_ROTATE_0 | + DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + zpos = STAGE_BASE; + else + zpos = STAGE0 + drm_plane_index(plane); + drm_plane_create_zpos_property(plane, zpos, 1, 255); +} + +static void +mdp5_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + struct mdp5_kms *mdp5_kms = get_kms(state->plane); + + drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? + pstate->hwpipe->name : "(null)"); + if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) + drm_printf(p, "\tright-hwpipe=%s\n", + pstate->r_hwpipe ? pstate->r_hwpipe->name : + "(null)"); + drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode); + drm_printf(p, "\tzpos=%u\n", pstate->base.zpos); + drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos); + drm_printf(p, "\talpha=%u\n", pstate->base.alpha); + drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); +} + +static void mdp5_plane_reset(struct drm_plane *plane) +{ + struct mdp5_plane_state *mdp5_state; + + if (plane->state) + __drm_atomic_helper_plane_destroy_state(plane->state); + + kfree(to_mdp5_plane_state(plane->state)); + plane->state = NULL; + mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return; + __drm_atomic_helper_plane_reset(plane, &mdp5_state->base); +} + +static struct drm_plane_state * +mdp5_plane_duplicate_state(struct drm_plane *plane) +{ + struct mdp5_plane_state *mdp5_state; + + if (WARN_ON(!plane->state)) + return NULL; + + mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), + sizeof(*mdp5_state), GFP_KERNEL); + if (!mdp5_state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); + + return &mdp5_state->base; +} + +static void mdp5_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + + kfree(pstate); +} + +static const struct drm_plane_funcs mdp5_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = mdp5_plane_destroy, + .reset = mdp5_plane_reset, + .atomic_duplicate_state = mdp5_plane_duplicate_state, + .atomic_destroy_state = mdp5_plane_destroy_state, + .atomic_print_state = mdp5_plane_atomic_print_state, +}; + +static int mdp5_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct msm_drm_private *priv = plane->dev->dev_private; + struct msm_kms *kms = priv->kms; + bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb; + + if (!new_state->fb) + return 0; + + drm_gem_plane_helper_prepare_fb(plane, new_state); + + return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb); +} + +static void mdp5_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct msm_kms *kms = &mdp5_kms->base.base; + struct drm_framebuffer *fb = old_state->fb; + bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb; + + if (!fb) + return; + + DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); + msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb); +} + +static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, + struct drm_plane_state *state) +{ + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); + struct drm_plane *plane = state->plane; + struct drm_plane_state *old_state = plane->state; + struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); + bool new_hwpipe = false; + bool need_right_hwpipe = false; + uint32_t max_width, max_height; + bool out_of_bounds = false; + uint32_t caps = 0; + int min_scale, max_scale; + int ret; + + DBG("%s: check (%d -> %d)", plane->name, + plane_enabled(old_state), plane_enabled(state)); + + max_width = config->hw->lm.max_width << 16; + max_height = config->hw->lm.max_height << 16; + + /* Make sure source dimensions are within bounds. */ + if (state->src_h > max_height) + out_of_bounds = true; + + if (state->src_w > max_width) { + /* If source split is supported, we can go up to 2x + * the max LM width, but we'd need to stage another + * hwpipe to the right LM. So, the drm_plane would + * consist of 2 hwpipes. + */ + if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT && + (state->src_w <= 2 * max_width)) + need_right_hwpipe = true; + else + out_of_bounds = true; + } + + if (out_of_bounds) { + struct drm_rect src = drm_plane_state_src(state); + DBG("Invalid source size "DRM_RECT_FP_FMT, + DRM_RECT_FP_ARG(&src)); + return -ERANGE; + } + + min_scale = FRAC_16_16(1, 8); + max_scale = FRAC_16_16(8, 1); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + min_scale, max_scale, + true, true); + if (ret) + return ret; + + if (plane_enabled(state)) { + unsigned int rotation; + const struct mdp_format *format; + struct mdp5_kms *mdp5_kms = get_kms(plane); + uint32_t blkcfg = 0; + + format = to_mdp_format(msm_framebuffer_format(state->fb)); + if (MDP_FORMAT_IS_YUV(format)) + caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; + + if (((state->src_w >> 16) != state->crtc_w) || + ((state->src_h >> 16) != state->crtc_h)) + caps |= MDP_PIPE_CAP_SCALE; + + rotation = drm_rotation_simplify(state->rotation, + DRM_MODE_ROTATE_0 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + + if (rotation & DRM_MODE_REFLECT_X) + caps |= MDP_PIPE_CAP_HFLIP; + + if (rotation & DRM_MODE_REFLECT_Y) + caps |= MDP_PIPE_CAP_VFLIP; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + caps |= MDP_PIPE_CAP_CURSOR; + + /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ + if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) + new_hwpipe = true; + + /* + * (re)allocte hw pipe if we're either requesting for 2 hw pipes + * or we're switching from 2 hw pipes to 1 hw pipe because the + * new src_w can be supported by 1 hw pipe itself. + */ + if ((need_right_hwpipe && !mdp5_state->r_hwpipe) || + (!need_right_hwpipe && mdp5_state->r_hwpipe)) + new_hwpipe = true; + + if (mdp5_kms->smp) { + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(state->fb)); + + blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format, + state->src_w >> 16, false); + + if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg)) + new_hwpipe = true; + } + + /* (re)assign hwpipe if needed, otherwise keep old one: */ + if (new_hwpipe) { + /* TODO maybe we want to re-assign hwpipe sometimes + * in cases when we no-longer need some caps to make + * it available for other planes? + */ + struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; + struct mdp5_hw_pipe *old_right_hwpipe = + mdp5_state->r_hwpipe; + struct mdp5_hw_pipe *new_hwpipe = NULL; + struct mdp5_hw_pipe *new_right_hwpipe = NULL; + + ret = mdp5_pipe_assign(state->state, plane, caps, + blkcfg, &new_hwpipe, + need_right_hwpipe ? + &new_right_hwpipe : NULL); + if (ret) { + DBG("%s: failed to assign hwpipe(s)!", + plane->name); + return ret; + } + + mdp5_state->hwpipe = new_hwpipe; + if (need_right_hwpipe) + mdp5_state->r_hwpipe = new_right_hwpipe; + else + /* + * set it to NULL so that the driver knows we + * don't have a right hwpipe when committing a + * new state + */ + mdp5_state->r_hwpipe = NULL; + + + ret = mdp5_pipe_release(state->state, old_hwpipe); + if (ret) + return ret; + + ret = mdp5_pipe_release(state->state, old_right_hwpipe); + if (ret) + return ret; + + } + } else { + ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe); + if (ret) + return ret; + + ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); + if (ret) + return ret; + + mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; + } + + return 0; +} + +static int mdp5_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc; + if (!crtc) + return 0; + + crtc_state = drm_atomic_get_existing_crtc_state(state, + crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state); +} + +static void mdp5_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + + DBG("%s: update", plane->name); + + if (plane_enabled(new_state)) { + int ret; + + ret = mdp5_plane_mode_set(plane, + new_state->crtc, new_state->fb, + &new_state->src, &new_state->dst); + /* atomic_check should have ensured that this doesn't fail */ + WARN_ON(ret < 0); + } +} + +static int mdp5_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state); + struct drm_crtc_state *crtc_state; + int min_scale, max_scale; + int ret; + + crtc_state = drm_atomic_get_existing_crtc_state(state, + new_plane_state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + if (!crtc_state->active) + return -EINVAL; + + /* don't use fast path if we don't have a hwpipe allocated yet */ + if (!mdp5_state->hwpipe) + return -EINVAL; + + /* only allow changing of position(crtc x/y or src x/y) in fast path */ + if (plane->state->crtc != new_plane_state->crtc || + plane->state->src_w != new_plane_state->src_w || + plane->state->src_h != new_plane_state->src_h || + plane->state->crtc_w != new_plane_state->crtc_w || + plane->state->crtc_h != new_plane_state->crtc_h || + !plane->state->fb || + plane->state->fb != new_plane_state->fb) + return -EINVAL; + + min_scale = FRAC_16_16(1, 8); + max_scale = FRAC_16_16(8, 1); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + min_scale, max_scale, + true, true); + if (ret) + return ret; + + /* + * if the visibility of the plane changes (i.e, if the cursor is + * clipped out completely, we can't take the async path because + * we need to stage/unstage the plane from the Layer Mixer(s). We + * also assign/unassign the hwpipe(s) tied to the plane. We avoid + * taking the fast path for both these reasons. + */ + if (new_plane_state->visible != plane->state->visible) + return -EINVAL; + + return 0; +} + +static void mdp5_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_framebuffer *old_fb = plane->state->fb; + + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + + if (plane_enabled(new_state)) { + struct mdp5_ctl *ctl; + struct mdp5_pipeline *pipeline = + mdp5_crtc_get_pipeline(new_state->crtc); + int ret; + + ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb, + &new_state->src, &new_state->dst); + WARN_ON(ret < 0); + + ctl = mdp5_crtc_get_ctl(new_state->crtc); + + mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true); + } + + *to_mdp5_plane_state(plane->state) = + *to_mdp5_plane_state(new_state); + + new_state->fb = old_fb; +} + +static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { + .prepare_fb = mdp5_plane_prepare_fb, + .cleanup_fb = mdp5_plane_cleanup_fb, + .atomic_check = mdp5_plane_atomic_check, + .atomic_update = mdp5_plane_atomic_update, + .atomic_async_check = mdp5_plane_atomic_async_check, + .atomic_async_update = mdp5_plane_atomic_async_update, +}; + +static void set_scanout_locked(struct mdp5_kms *mdp5_kms, + enum mdp5_pipe pipe, + struct drm_framebuffer *fb) +{ + struct msm_kms *kms = &mdp5_kms->base.base; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), + MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | + MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), + MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | + MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 0)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 1)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 2)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), + msm_framebuffer_iova(fb, kms->aspace, 3)); +} + +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe) +{ + uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) & + ~MDP5_PIPE_OP_MODE_CSC_1_EN; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value); +} + +/* Note: mdp5_plane->pipe_lock must be locked */ +static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, + struct csc_cfg *csc) +{ + uint32_t i, mode = 0; /* RGB, no CSC */ + uint32_t *matrix; + + if (unlikely(!csc)) + return; + + if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV); + if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type)) + mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV); + mode |= MDP5_PIPE_OP_MODE_CSC_1_EN; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode); + + matrix = csc->matrix; + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) | + MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7])); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe), + MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8])); + + for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) { + uint32_t *pre_clamp = csc->pre_clamp; + uint32_t *post_clamp = csc->post_clamp; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i), + MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) | + MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i), + MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i), + MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i])); + } +} + +#define PHASE_STEP_SHIFT 21 +#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */ + +static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase) +{ + uint32_t unit; + + if (src == 0 || dst == 0) + return -EINVAL; + + /* + * PHASE_STEP_X/Y is coded on 26 bits (25:0), + * where 2^21 represents the unity "1" in fixed-point hardware design. + * This leaves 5 bits for the integer part (downscale case): + * -> maximum downscale ratio = 0b1_1111 = 31 + */ + if (src > (dst * DOWN_SCALE_RATIO_MAX)) + return -EOVERFLOW; + + unit = 1 << PHASE_STEP_SHIFT; + *out_phase = mult_frac(unit, src, dst); + + return 0; +} + +static int calc_scalex_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasex_steps[COMP_MAX]) +{ + const struct drm_format_info *info = drm_format_info(pixel_format); + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; + uint32_t phasex_step; + int ret; + + ret = calc_phase_step(src, dest, &phasex_step); + if (ret) { + DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret); + return ret; + } + + phasex_steps[COMP_0] = phasex_step; + phasex_steps[COMP_3] = phasex_step; + phasex_steps[COMP_1_2] = phasex_step / info->hsub; + + return 0; +} + +static int calc_scaley_steps(struct drm_plane *plane, + uint32_t pixel_format, uint32_t src, uint32_t dest, + uint32_t phasey_steps[COMP_MAX]) +{ + const struct drm_format_info *info = drm_format_info(pixel_format); + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct device *dev = mdp5_kms->dev->dev; + uint32_t phasey_step; + int ret; + + ret = calc_phase_step(src, dest, &phasey_step); + if (ret) { + DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret); + return ret; + } + + phasey_steps[COMP_0] = phasey_step; + phasey_steps[COMP_3] = phasey_step; + phasey_steps[COMP_1_2] = phasey_step / info->vsub; + + return 0; +} + +static uint32_t get_scale_config(const struct mdp_format *format, + uint32_t src, uint32_t dst, bool horz) +{ + const struct drm_format_info *info = drm_format_info(format->base.pixel_format); + bool scaling = format->is_yuv ? true : (src != dst); + uint32_t sub; + uint32_t ya_filter, uv_filter; + bool yuv = format->is_yuv; + + if (!scaling) + return 0; + + if (yuv) { + sub = horz ? info->hsub : info->vsub; + uv_filter = ((src / sub) <= dst) ? + SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + } + ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN; + + if (horz) + return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) | + COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter)); + else + return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) | + MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) | + COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter)); +} + +static void calc_pixel_ext(const struct mdp_format *format, + uint32_t src, uint32_t dst, uint32_t phase_step[2], + int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX], + bool horz) +{ + bool scaling = format->is_yuv ? true : (src != dst); + int i; + + /* + * Note: + * We assume here that: + * 1. PCMN filter is used for downscale + * 2. bilinear filter is used for upscale + * 3. we are in a single pipe configuration + */ + + for (i = 0; i < COMP_MAX; i++) { + pix_ext_edge1[i] = 0; + pix_ext_edge2[i] = scaling ? 1 : 0; + } +} + +static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, + const struct mdp_format *format, + uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX], + uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX]) +{ + const struct drm_format_info *info = drm_format_info(format->base.pixel_format); + uint32_t lr, tb, req; + int i; + + for (i = 0; i < COMP_MAX; i++) { + uint32_t roi_w = src_w; + uint32_t roi_h = src_h; + + if (format->is_yuv && i == COMP_1_2) { + roi_w /= info->hsub; + roi_h /= info->vsub; + } + + lr = (pe_left[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) : + MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]); + + lr |= (pe_right[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) : + MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]); + + tb = (pe_top[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) : + MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]); + + tb |= (pe_bottom[i] >= 0) ? + MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) : + MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]); + + req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w + + pe_left[i] + pe_right[i]); + + req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h + + pe_top[i] + pe_bottom[i]); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req); + + DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i, + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF), + FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF), + FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT)); + + DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i, + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF), + FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF), + FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM)); + } +} + +struct pixel_ext { + int left[COMP_MAX]; + int right[COMP_MAX]; + int top[COMP_MAX]; + int bottom[COMP_MAX]; +}; + +struct phase_step { + u32 x[COMP_MAX]; + u32 y[COMP_MAX]; +}; + +static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms, + struct mdp5_hw_pipe *hwpipe, + struct drm_framebuffer *fb, + struct phase_step *step, + struct pixel_ext *pe, + u32 scale_config, u32 hdecm, u32 vdecm, + bool hflip, bool vflip, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + u32 src_img_w, u32 src_img_h, + u32 src_x, u32 src_y, + u32 src_w, u32 src_h) +{ + enum mdp5_pipe pipe = hwpipe->pipe; + bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(fb)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), + MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) | + MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), + MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | + MDP5_PIPE_SRC_SIZE_HEIGHT(src_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), + MDP5_PIPE_SRC_XY_X(src_x) | + MDP5_PIPE_SRC_XY_Y(src_y)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), + MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) | + MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), + MDP5_PIPE_OUT_XY_X(crtc_x) | + MDP5_PIPE_OUT_XY_Y(crtc_y)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), + MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | + MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | + MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | + MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | + MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | + MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | + MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), + MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | + MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | + MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | + MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), + (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | + (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | + COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) | + MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); + + /* not using secure mode: */ + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); + + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) + mdp5_write_pixel_ext(mdp5_kms, pipe, format, + src_w, pe->left, pe->right, + src_h, pe->top, pe->bottom); + + if (hwpipe->caps & MDP_PIPE_CAP_SCALE) { + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), + step->x[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), + step->y[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), + step->x[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), + step->y[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), + MDP5_PIPE_DECIMATION_VERT(vdecm) | + MDP5_PIPE_DECIMATION_HORZ(hdecm)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), + scale_config); + } + + if (hwpipe->caps & MDP_PIPE_CAP_CSC) { + if (MDP_FORMAT_IS_YUV(format)) + csc_enable(mdp5_kms, pipe, + mdp_get_default_csc_cfg(CSC_YUV2RGB)); + else + csc_disable(mdp5_kms, pipe); + } + + set_scanout_locked(mdp5_kms, pipe, fb); +} + +static int mdp5_plane_mode_set(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_rect *src, struct drm_rect *dest) +{ + struct drm_plane_state *pstate = plane->state; + struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; + struct mdp5_kms *mdp5_kms = get_kms(plane); + enum mdp5_pipe pipe = hwpipe->pipe; + struct mdp5_hw_pipe *right_hwpipe; + const struct mdp_format *format; + uint32_t nplanes, config = 0; + struct phase_step step = { { 0 } }; + struct pixel_ext pe = { { 0 } }; + uint32_t hdecm = 0, vdecm = 0; + uint32_t pix_format; + unsigned int rotation; + bool vflip, hflip; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y; + uint32_t src_w, src_h; + uint32_t src_img_w, src_img_h; + int ret; + + nplanes = fb->format->num_planes; + + /* bad formats should already be rejected: */ + if (WARN_ON(nplanes > pipe2nclients(pipe))) + return -EINVAL; + + format = to_mdp_format(msm_framebuffer_format(fb)); + pix_format = format->base.pixel_format; + + src_x = src->x1; + src_y = src->y1; + src_w = drm_rect_width(src); + src_h = drm_rect_height(src); + + crtc_x = dest->x1; + crtc_y = dest->y1; + crtc_w = drm_rect_width(dest); + crtc_h = drm_rect_height(dest); + + /* src values are in Q16 fixed point, convert to integer: */ + src_x = src_x >> 16; + src_y = src_y >> 16; + src_w = src_w >> 16; + src_h = src_h >> 16; + + src_img_w = min(fb->width, src_w); + src_img_h = min(fb->height, src_h); + + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, + fb->base.id, src_x, src_y, src_w, src_h, + crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); + + right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe; + if (right_hwpipe) { + /* + * if the plane comprises of 2 hw pipes, assume that the width + * is split equally across them. The only parameters that varies + * between the 2 pipes are src_x and crtc_x + */ + crtc_w /= 2; + src_w /= 2; + src_img_w /= 2; + } + + ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x); + if (ret) + return ret; + + ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y); + if (ret) + return ret; + + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { + calc_pixel_ext(format, src_w, crtc_w, step.x, + pe.left, pe.right, true); + calc_pixel_ext(format, src_h, crtc_h, step.y, + pe.top, pe.bottom, false); + } + + /* TODO calc hdecm, vdecm */ + + /* SCALE is used to both scale and up-sample chroma components */ + config |= get_scale_config(format, src_w, crtc_w, true); + config |= get_scale_config(format, src_h, crtc_h, false); + DBG("scale config = %x", config); + + rotation = drm_rotation_simplify(pstate->rotation, + DRM_MODE_ROTATE_0 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + hflip = !!(rotation & DRM_MODE_REFLECT_X); + vflip = !!(rotation & DRM_MODE_REFLECT_Y); + + mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe, + config, hdecm, vdecm, hflip, vflip, + crtc_x, crtc_y, crtc_w, crtc_h, + src_img_w, src_img_h, + src_x, src_y, src_w, src_h); + if (right_hwpipe) + mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe, + config, hdecm, vdecm, hflip, vflip, + crtc_x + crtc_w, crtc_y, crtc_w, crtc_h, + src_img_w, src_img_h, + src_x + src_w, src_y, src_w, src_h); + + return ret; +} + +/* + * Use this func and the one below only after the atomic state has been + * successfully swapped + */ +enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + + if (WARN_ON(!pstate->hwpipe)) + return SSPP_NONE; + + return pstate->hwpipe->pipe; +} + +enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + + if (!pstate->r_hwpipe) + return SSPP_NONE; + + return pstate->r_hwpipe->pipe; +} + +uint32_t mdp5_plane_get_flush(struct drm_plane *plane) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + u32 mask; + + if (WARN_ON(!pstate->hwpipe)) + return 0; + + mask = pstate->hwpipe->flush_mask; + + if (pstate->r_hwpipe) + mask |= pstate->r_hwpipe->flush_mask; + + return mask; +} + +/* initialize plane */ +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type) +{ + struct drm_plane *plane = NULL; + struct mdp5_plane *mdp5_plane; + int ret; + + mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); + if (!mdp5_plane) { + ret = -ENOMEM; + goto fail; + } + + plane = &mdp5_plane->base; + + mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, + ARRAY_SIZE(mdp5_plane->formats), false); + + ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, + mdp5_plane->formats, mdp5_plane->nformats, + NULL, type, NULL); + if (ret) + goto fail; + + drm_plane_helper_add(plane, &mdp5_plane_helper_funcs); + + mdp5_plane_install_properties(plane, &plane->base); + + drm_plane_enable_fb_damage_clips(plane); + + return plane; + +fail: + if (plane) + mdp5_plane_destroy(plane); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c new file mode 100644 index 000000000..56a306354 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include + +#include "mdp5_kms.h" +#include "mdp5_smp.h" + + +struct mdp5_smp { + struct drm_device *dev; + + uint8_t reserved[MAX_CLIENTS]; /* fixed MMBs allocation per client */ + + int blk_cnt; + int blk_size; + + /* register cache */ + u32 alloc_w[22]; + u32 alloc_r[22]; + u32 pipe_reqprio_fifo_wm0[SSPP_MAX]; + u32 pipe_reqprio_fifo_wm1[SSPP_MAX]; + u32 pipe_reqprio_fifo_wm2[SSPP_MAX]; +}; + +static inline +struct mdp5_kms *get_kms(struct mdp5_smp *smp) +{ + struct msm_drm_private *priv = smp->dev->dev_private; + + return to_mdp5_kms(to_mdp_kms(priv->kms)); +} + +static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) +{ +#define CID_UNUSED 0 + + if (WARN_ON(plane >= pipe2nclients(pipe))) + return CID_UNUSED; + + /* + * Note on SMP clients: + * For ViG pipes, fetch Y/Cr/Cb-components clients are always + * consecutive, and in that order. + * + * e.g.: + * if mdp5_cfg->smp.clients[SSPP_VIG0] = N, + * Y plane's client ID is N + * Cr plane's client ID is N + 1 + * Cb plane's client ID is N + 2 + */ + + return mdp5_cfg->smp.clients[pipe] + plane; +} + +/* allocate blocks for the specified request: */ +static int smp_request_block(struct mdp5_smp *smp, + struct mdp5_smp_state *state, + u32 cid, int nblks) +{ + void *cs = state->client_state[cid]; + int i, avail, cnt = smp->blk_cnt; + uint8_t reserved; + + /* we shouldn't be requesting blocks for an in-use client: */ + WARN_ON(!bitmap_empty(cs, cnt)); + + reserved = smp->reserved[cid]; + + if (reserved) { + nblks = max(0, nblks - reserved); + DBG("%d MMBs allocated (%d reserved)", nblks, reserved); + } + + avail = cnt - bitmap_weight(state->state, cnt); + if (nblks > avail) { + DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", + nblks, avail); + return -ENOSPC; + } + + for (i = 0; i < nblks; i++) { + int blk = find_first_zero_bit(state->state, cnt); + set_bit(blk, cs); + set_bit(blk, state->state); + } + + return 0; +} + +static void set_fifo_thresholds(struct mdp5_smp *smp, + enum mdp5_pipe pipe, int nblks) +{ + u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); + u32 val; + + /* 1/4 of SMP pool that is being fetched */ + val = (nblks * smp_entries_per_blk) / 4; + + smp->pipe_reqprio_fifo_wm0[pipe] = val * 1; + smp->pipe_reqprio_fifo_wm1[pipe] = val * 2; + smp->pipe_reqprio_fifo_wm2[pipe] = val * 3; +} + +/* + * NOTE: looks like if horizontal decimation is used (if we supported that) + * then the width used to calculate SMP block requirements is the post- + * decimated width. Ie. SMP buffering sits downstream of decimation (which + * presumably happens during the dma from scanout buffer). + */ +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim) +{ + const struct drm_format_info *info = drm_format_info(format->base.pixel_format); + struct mdp5_kms *mdp5_kms = get_kms(smp); + int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); + int i, hsub, nplanes, nlines; + uint32_t blkcfg = 0; + + nplanes = info->num_planes; + hsub = info->hsub; + + /* different if BWC (compressed framebuffer?) enabled: */ + nlines = 2; + + /* Newer MDPs have split/packing logic, which fetches sub-sampled + * U and V components (splits them from Y if necessary) and packs + * them together, writes to SMP using a single client. + */ + if ((rev > 0) && (format->chroma_sample > CHROMA_FULL)) { + nplanes = 2; + + /* if decimation is enabled, HW decimates less on the + * sub sampled chroma components + */ + if (hdecim && (hsub > 1)) + hsub = 1; + } + + for (i = 0; i < nplanes; i++) { + int n, fetch_stride, cpp; + + cpp = info->cpp[i]; + fetch_stride = width * cpp / (i ? hsub : 1); + + n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); + + /* for hw rev v1.00 */ + if (rev == 0) + n = roundup_pow_of_two(n); + + blkcfg |= (n << (8 * i)); + } + + return blkcfg; +} + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + int n = blkcfg & 0xff; + + if (!n) + continue; + + DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); + ret = smp_request_block(smp, state, cid, n); + if (ret) { + DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n", + n, ret); + return ret; + } + + blkcfg >>= 8; + } + + state->assigned |= (1 << pipe); + + return 0; +} + +/* Release SMP blocks for all clients of the pipe */ +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe) +{ + int i; + int cnt = smp->blk_cnt; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + void *cs = state->client_state[cid]; + + /* update global state: */ + bitmap_andnot(state->state, state->state, cs, cnt); + + /* clear client's state */ + bitmap_zero(cs, cnt); + } + + state->released |= (1 << pipe); +} + +/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to + * happen after scanout completes. + */ +static unsigned update_smp_state(struct mdp5_smp *smp, + u32 cid, mdp5_smp_state_t *assigned) +{ + int cnt = smp->blk_cnt; + unsigned nblks = 0; + u32 blk, val; + + for_each_set_bit(blk, *assigned, cnt) { + int idx = blk / 3; + int fld = blk % 3; + + val = smp->alloc_w[idx]; + + switch (fld) { + case 0: + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid); + break; + case 1: + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid); + break; + case 2: + val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK; + val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid); + break; + } + + smp->alloc_w[idx] = val; + smp->alloc_r[idx] = val; + + nblks++; + } + + return nblks; +} + +static void write_smp_alloc_regs(struct mdp5_smp *smp) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + int i, num_regs; + + num_regs = smp->blk_cnt / 3 + 1; + + for (i = 0; i < num_regs; i++) { + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i), + smp->alloc_w[i]); + mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i), + smp->alloc_r[i]); + } +} + +static void write_smp_fifo_regs(struct mdp5_smp *smp) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + int i; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + enum mdp5_pipe pipe = hwpipe->pipe; + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), + smp->pipe_reqprio_fifo_wm0[pipe]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), + smp->pipe_reqprio_fifo_wm1[pipe]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), + smp->pipe_reqprio_fifo_wm2[pipe]); + } +} + +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) +{ + enum mdp5_pipe pipe; + + for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) { + unsigned i, nblks = 0; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + void *cs = state->client_state[cid]; + + nblks += update_smp_state(smp, cid, cs); + + DBG("assign %s:%u, %u blks", + pipe2name(pipe), i, nblks); + } + + set_fifo_thresholds(smp, pipe, nblks); + } + + write_smp_alloc_regs(smp); + write_smp_fifo_regs(smp); + + state->assigned = 0; +} + +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) +{ + enum mdp5_pipe pipe; + + for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) { + DBG("release %s", pipe2name(pipe)); + set_fifo_thresholds(smp, pipe, 0); + } + + write_smp_fifo_regs(smp); + + state->released = 0; +} + +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct mdp5_hw_pipe_state *hwpstate; + struct mdp5_smp_state *state; + struct mdp5_global_state *global_state; + int total = 0, i, j; + + drm_printf(p, "name\tinuse\tplane\n"); + drm_printf(p, "----\t-----\t-----\n"); + + if (drm_can_sleep()) + drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL); + + global_state = mdp5_get_existing_global_state(mdp5_kms); + + /* grab these *after* we hold the state_lock */ + hwpstate = &global_state->hwpipe; + state = &global_state->smp; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx]; + enum mdp5_pipe pipe = hwpipe->pipe; + for (j = 0; j < pipe2nclients(pipe); j++) { + u32 cid = pipe2client(pipe, j); + void *cs = state->client_state[cid]; + int inuse = bitmap_weight(cs, smp->blk_cnt); + + drm_printf(p, "%s:%d\t%d\t%s\n", + pipe2name(pipe), j, inuse, + plane ? plane->name : NULL); + + total += inuse; + } + } + + drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt); + drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt - + bitmap_weight(state->state, smp->blk_cnt)); + + if (drm_can_sleep()) + drm_modeset_unlock(&mdp5_kms->glob_state_lock); +} + +void mdp5_smp_destroy(struct mdp5_smp *smp) +{ + kfree(smp); +} + +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) +{ + struct mdp5_smp_state *state; + struct mdp5_global_state *global_state; + struct mdp5_smp *smp = NULL; + int ret; + + smp = kzalloc(sizeof(*smp), GFP_KERNEL); + if (unlikely(!smp)) { + ret = -ENOMEM; + goto fail; + } + + smp->dev = mdp5_kms->dev; + smp->blk_cnt = cfg->mmb_count; + smp->blk_size = cfg->mmb_size; + + global_state = mdp5_get_existing_global_state(mdp5_kms); + state = &global_state->smp; + + /* statically tied MMBs cannot be re-allocated: */ + bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); + memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); + + return smp; +fail: + if (smp) + mdp5_smp_destroy(smp); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h new file mode 100644 index 000000000..ba5618e13 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __MDP5_SMP_H__ +#define __MDP5_SMP_H__ + +#include + +#include "msm_drv.h" + +/* + * SMP - Shared Memory Pool: + * + * SMP blocks are shared between all the clients, where each plane in + * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on + * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. + * + * Based on the size of the attached scanout buffer, a certain # of + * blocks must be allocated to that client out of the shared pool. + * + * In some hw, some blocks are statically allocated for certain pipes + * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). + * + * + * Atomic SMP State: + * + * On atomic updates that modify SMP configuration, the state is cloned + * (copied) and modified. For test-only, or in cases where atomic + * update fails (or if we hit ww_mutex deadlock/backoff condition) the + * new state is simply thrown away. + * + * Because the SMP registers are not double buffered, updates are a + * two step process: + * + * 1) in _prepare_commit() we configure things (via read-modify-write) + * for the newly assigned pipes, so we don't take away blocks + * assigned to pipes that are still scanning out + * 2) in _complete_commit(), after vblank/etc, we clear things for the + * released clients, since at that point old pipes are no longer + * scanning out. + */ +struct mdp5_smp_state { + /* global state of what blocks are in use: */ + mdp5_smp_state_t state; + + /* per client state of what blocks they are using: */ + mdp5_smp_state_t client_state[MAX_CLIENTS]; + + /* assigned pipes (hw updated at _prepare_commit()): */ + unsigned long assigned; + + /* released pipes (hw updated at _complete_commit()): */ + unsigned long released; +}; + +struct mdp5_kms; +struct mdp5_smp; + +/* + * SMP module prototypes: + * mdp5_smp_init() returns a SMP @handler, + * which is then used to call the other mdp5_smp_*(handler, ...) functions. + */ + +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, + const struct mdp5_smp_block *cfg); +void mdp5_smp_destroy(struct mdp5_smp *smp); + +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p); + +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim); + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg); +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe); + +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); + +#endif /* __MDP5_SMP_H__ */ diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h new file mode 100644 index 000000000..be759106b --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h @@ -0,0 +1,111 @@ +#ifndef MDP_COMMON_XML +#define MDP_COMMON_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum mdp_chroma_samp_type { + CHROMA_FULL = 0, + CHROMA_H2V1 = 1, + CHROMA_H1V2 = 2, + CHROMA_420 = 3, +}; + +enum mdp_fetch_type { + MDP_PLANE_INTERLEAVED = 0, + MDP_PLANE_PLANAR = 1, + MDP_PLANE_PSEUDO_PLANAR = 2, +}; + +enum mdp_mixer_stage_id { + STAGE_UNUSED = 0, + STAGE_BASE = 1, + STAGE0 = 2, + STAGE1 = 3, + STAGE2 = 4, + STAGE3 = 5, + STAGE4 = 6, + STAGE5 = 7, + STAGE6 = 8, + STAGE_MAX = 8, +}; + +enum mdp_alpha_type { + FG_CONST = 0, + BG_CONST = 1, + FG_PIXEL = 2, + BG_PIXEL = 3, +}; + +enum mdp_component_type { + COMP_0 = 0, + COMP_1_2 = 1, + COMP_3 = 2, + COMP_MAX = 3, +}; + +enum mdp_bpc { + BPC1 = 0, + BPC5 = 1, + BPC6 = 2, + BPC8 = 3, +}; + +enum mdp_bpc_alpha { + BPC1A = 0, + BPC4A = 1, + BPC6A = 2, + BPC8A = 3, +}; + + +#endif /* MDP_COMMON_XML */ diff --git a/drivers/gpu/drm/msm/disp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c new file mode 100644 index 000000000..025595336 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_format.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#include +#include + +#include "msm_drv.h" +#include "mdp_kms.h" + +static struct csc_cfg csc_convert[CSC_MAX] = { + [CSC_RGB2RGB] = { + .type = CSC_RGB2RGB, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x0, 0x0, 0x0 }, + .post_bias = { 0x0, 0x0, 0x0 }, + .pre_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + .post_clamp = { 0x0, 0xff, 0x0, 0xff, 0x0, 0xff }, + }, + [CSC_YUV2RGB] = { + .type = CSC_YUV2RGB, + .matrix = { + 0x0254, 0x0000, 0x0331, + 0x0254, 0xff37, 0xfe60, + 0x0254, 0x0409, 0x0000 + }, + .pre_bias = { 0xfff0, 0xff80, 0xff80 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, + [CSC_RGB2YUV] = { + .type = CSC_RGB2YUV, + .matrix = { + 0x0083, 0x0102, 0x0032, + 0x1fb5, 0x1f6c, 0x00e1, + 0x00e1, 0x1f45, 0x1fdc + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x10, 0x80, 0x80 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0 }, + }, + [CSC_YUV2YUV] = { + .type = CSC_YUV2YUV, + .matrix = { + 0x0200, 0x0000, 0x0000, + 0x0000, 0x0200, 0x0000, + 0x0000, 0x0000, 0x0200 + }, + .pre_bias = { 0x00, 0x00, 0x00 }, + .post_bias = { 0x00, 0x00, 0x00 }, + .pre_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + .post_clamp = { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff }, + }, +}; + +#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt, fp, cs, yuv) { \ + .base = { .pixel_format = DRM_FORMAT_ ## name }, \ + .bpc_a = BPC ## a ## A, \ + .bpc_r = BPC ## r, \ + .bpc_g = BPC ## g, \ + .bpc_b = BPC ## b, \ + .unpack = { e0, e1, e2, e3 }, \ + .alpha_enable = alpha, \ + .unpack_tight = tight, \ + .cpp = c, \ + .unpack_count = cnt, \ + .fetch_type = fp, \ + .chroma_sample = cs, \ + .is_yuv = yuv, \ +} + +#define BPC0A 0 + +/* + * Note: Keep RGB formats 1st, followed by YUV formats to avoid breaking + * mdp_get_rgb_formats()'s implementation. + */ +static const struct mdp_format formats[] = { + /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ + FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + + /* --- RGB formats above / YUV formats below this line --- */ + + /* 2 plane YUV */ + FMT(NV12, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), + FMT(NV21, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_420, true), + FMT(NV16, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + FMT(NV61, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 2, 2, + MDP_PLANE_PSEUDO_PLANAR, CHROMA_H2V1, true), + /* 1 plane YUV */ + FMT(VYUY, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(UYVY, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YUYV, 0, 8, 8, 8, 0, 1, 0, 2, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + FMT(YVYU, 0, 8, 8, 8, 0, 2, 0, 1, false, true, 2, 4, + MDP_PLANE_INTERLEAVED, CHROMA_H2V1, true), + /* 3 plane YUV */ + FMT(YUV420, 0, 8, 8, 8, 2, 1, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), + FMT(YVU420, 0, 8, 8, 8, 1, 2, 0, 0, false, true, 1, 1, + MDP_PLANE_PLANAR, CHROMA_420, true), +}; + +/* + * Note: + * @rgb_only must be set to true, when requesting + * supported formats for RGB pipes. + */ +uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats, + bool rgb_only) +{ + uint32_t i; + for (i = 0; i < ARRAY_SIZE(formats); i++) { + const struct mdp_format *f = &formats[i]; + + if (i == max_formats) + break; + + if (rgb_only && MDP_FORMAT_IS_YUV(f)) + break; + + pixel_formats[i] = f->base.pixel_format; + } + + return i; +} + +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, + uint64_t modifier) +{ + int i; + for (i = 0; i < ARRAY_SIZE(formats); i++) { + const struct mdp_format *f = &formats[i]; + if (f->base.pixel_format == format) + return &f->base; + } + return NULL; +} + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type type) +{ + if (WARN_ON(type >= CSC_MAX)) + return NULL; + + return &csc_convert[type]; +} diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c new file mode 100644 index 000000000..3c35ccfc7 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_kms.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + + +#include "msm_drv.h" +#include "mdp_kms.h" + + +struct mdp_irq_wait { + struct mdp_irq irq; + int count; +}; + +static DECLARE_WAIT_QUEUE_HEAD(wait_event); + +static DEFINE_SPINLOCK(list_lock); + +static void update_irq(struct mdp_kms *mdp_kms) +{ + struct mdp_irq *irq; + uint32_t irqmask = mdp_kms->vblank_mask; + + assert_spin_locked(&list_lock); + + list_for_each_entry(irq, &mdp_kms->irq_list, node) + irqmask |= irq->irqmask; + + mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask); + mdp_kms->cur_irq_mask = irqmask; +} + +/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder + * link changes, this must be called to figure out the new global irqmask + */ +void mdp_irq_update(struct mdp_kms *mdp_kms) +{ + unsigned long flags; + spin_lock_irqsave(&list_lock, flags); + update_irq(mdp_kms); + spin_unlock_irqrestore(&list_lock, flags); +} + +void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status) +{ + struct mdp_irq *handler, *n; + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + mdp_kms->in_irq = true; + list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) { + if (handler->irqmask & status) { + spin_unlock_irqrestore(&list_lock, flags); + handler->irq(handler, handler->irqmask & status); + spin_lock_irqsave(&list_lock, flags); + } + } + mdp_kms->in_irq = false; + update_irq(mdp_kms); + spin_unlock_irqrestore(&list_lock, flags); + +} + +void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) +{ + unsigned long flags; + + spin_lock_irqsave(&list_lock, flags); + if (enable) + mdp_kms->vblank_mask |= mask; + else + mdp_kms->vblank_mask &= ~mask; + update_irq(mdp_kms); + spin_unlock_irqrestore(&list_lock, flags); +} + +static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus) +{ + struct mdp_irq_wait *wait = + container_of(irq, struct mdp_irq_wait, irq); + wait->count--; + wake_up_all(&wait_event); +} + +void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask) +{ + struct mdp_irq_wait wait = { + .irq = { + .irq = wait_irq, + .irqmask = irqmask, + }, + .count = 1, + }; + mdp_irq_register(mdp_kms, &wait.irq); + wait_event_timeout(wait_event, (wait.count <= 0), + msecs_to_jiffies(100)); + mdp_irq_unregister(mdp_kms, &wait.irq); +} + +void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) +{ + unsigned long flags; + bool needs_update = false; + + spin_lock_irqsave(&list_lock, flags); + + if (!irq->registered) { + irq->registered = true; + list_add(&irq->node, &mdp_kms->irq_list); + needs_update = !mdp_kms->in_irq; + } + + spin_unlock_irqrestore(&list_lock, flags); + + if (needs_update) + mdp_irq_update(mdp_kms); +} + +void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) +{ + unsigned long flags; + bool needs_update = false; + + spin_lock_irqsave(&list_lock, flags); + + if (irq->registered) { + irq->registered = false; + list_del(&irq->node); + needs_update = !mdp_kms->in_irq; + } + + spin_unlock_irqrestore(&list_lock, flags); + + if (needs_update) + mdp_irq_update(mdp_kms); +} diff --git a/drivers/gpu/drm/msm/disp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h new file mode 100644 index 000000000..b0286d5d5 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/mdp_kms.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Red Hat + * Author: Rob Clark + */ + +#ifndef __MDP_KMS_H__ +#define __MDP_KMS_H__ + +#include +#include +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "mdp_common.xml.h" + +struct mdp_kms; + +struct mdp_kms_funcs { + struct msm_kms_funcs base; + void (*set_irqmask)(struct mdp_kms *mdp_kms, uint32_t irqmask, + uint32_t old_irqmask); +}; + +struct mdp_kms { + struct msm_kms base; + + const struct mdp_kms_funcs *funcs; + + /* irq handling: */ + bool in_irq; + struct list_head irq_list; /* list of mdp4_irq */ + uint32_t vblank_mask; /* irq bits set for userspace vblank */ + uint32_t cur_irq_mask; /* current irq mask */ +}; +#define to_mdp_kms(x) container_of(x, struct mdp_kms, base) + +static inline int mdp_kms_init(struct mdp_kms *mdp_kms, + const struct mdp_kms_funcs *funcs) +{ + mdp_kms->funcs = funcs; + INIT_LIST_HEAD(&mdp_kms->irq_list); + return msm_kms_init(&mdp_kms->base, &funcs->base); +} + +static inline void mdp_kms_destroy(struct mdp_kms *mdp_kms) +{ + msm_kms_destroy(&mdp_kms->base); +} + +/* + * irq helpers: + */ + +/* For transiently registering for different MDP irqs that various parts + * of the KMS code need during setup/configuration. These are not + * necessarily the same as what drm_vblank_get/put() are requesting, and + * the hysteresis in drm_vblank_put() is not necessarily desirable for + * internal housekeeping related irq usage. + */ +struct mdp_irq { + struct list_head node; + uint32_t irqmask; + bool registered; + void (*irq)(struct mdp_irq *irq, uint32_t irqstatus); +}; + +void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status); +void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable); +void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); +void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); +void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); +void mdp_irq_update(struct mdp_kms *mdp_kms); + +/* + * pixel format helpers: + */ + +struct mdp_format { + struct msm_format base; + enum mdp_bpc bpc_r, bpc_g, bpc_b; + enum mdp_bpc_alpha bpc_a; + uint8_t unpack[4]; + bool alpha_enable, unpack_tight; + uint8_t cpp, unpack_count; + enum mdp_fetch_type fetch_type; + enum mdp_chroma_samp_type chroma_sample; + bool is_yuv; +}; +#define to_mdp_format(x) container_of(x, struct mdp_format, base) +#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) + +uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); +const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); + +/* MDP capabilities */ +#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ +#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ +#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */ +#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */ + +/* MDP pipe capabilities */ +#define MDP_PIPE_CAP_HFLIP BIT(0) +#define MDP_PIPE_CAP_VFLIP BIT(1) +#define MDP_PIPE_CAP_SCALE BIT(2) +#define MDP_PIPE_CAP_CSC BIT(3) +#define MDP_PIPE_CAP_DECIMATION BIT(4) +#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) +#define MDP_PIPE_CAP_CURSOR BIT(6) + +/* MDP layer mixer caps */ +#define MDP_LM_CAP_DISPLAY BIT(0) +#define MDP_LM_CAP_WB BIT(1) +#define MDP_LM_CAP_PAIR BIT(2) + +static inline bool pipe_supports_yuv(uint32_t pipe_caps) +{ + return (pipe_caps & MDP_PIPE_CAP_SCALE) && + (pipe_caps & MDP_PIPE_CAP_CSC); +} + +enum csc_type { + CSC_RGB2RGB = 0, + CSC_YUV2RGB, + CSC_RGB2YUV, + CSC_YUV2YUV, + CSC_MAX +}; + +struct csc_cfg { + enum csc_type type; + uint32_t matrix[9]; + uint32_t pre_bias[3]; + uint32_t post_bias[3]; + uint32_t pre_clamp[6]; + uint32_t post_clamp[6]; +}; + +struct csc_cfg *mdp_get_default_csc_cfg(enum csc_type); + +#endif /* __MDP_KMS_H__ */ diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c new file mode 100644 index 000000000..e75b97127 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include "msm_disp_snapshot.h" + +static ssize_t __maybe_unused disp_devcoredump_read(char *buffer, loff_t offset, + size_t count, void *data, size_t datalen) +{ + struct drm_print_iterator iter; + struct drm_printer p; + struct msm_disp_state *disp_state; + + disp_state = data; + + iter.data = buffer; + iter.offset = 0; + iter.start = offset; + iter.remain = count; + + p = drm_coredump_printer(&iter); + + msm_disp_state_print(disp_state, &p); + + return count - iter.remain; +} + +struct msm_disp_state * +msm_disp_snapshot_state_sync(struct msm_kms *kms) +{ + struct drm_device *drm_dev = kms->dev; + struct msm_disp_state *disp_state; + + WARN_ON(!mutex_is_locked(&kms->dump_mutex)); + + disp_state = kzalloc(sizeof(struct msm_disp_state), GFP_KERNEL); + if (!disp_state) + return ERR_PTR(-ENOMEM); + + disp_state->dev = drm_dev->dev; + disp_state->drm_dev = drm_dev; + + INIT_LIST_HEAD(&disp_state->blocks); + + msm_disp_snapshot_capture_state(disp_state); + + return disp_state; +} + +static void _msm_disp_snapshot_work(struct kthread_work *work) +{ + struct msm_kms *kms = container_of(work, struct msm_kms, dump_work); + struct msm_disp_state *disp_state; + struct drm_printer p; + + /* Serialize dumping here */ + mutex_lock(&kms->dump_mutex); + disp_state = msm_disp_snapshot_state_sync(kms); + mutex_unlock(&kms->dump_mutex); + + if (IS_ERR(disp_state)) + return; + + if (MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE) { + p = drm_info_printer(disp_state->drm_dev->dev); + msm_disp_state_print(disp_state, &p); + } + + /* + * If COREDUMP is disabled, the stub will call the free function. + * If there is a codedump pending for the device, the dev_coredumpm() + * will also free new coredump state. + */ + dev_coredumpm(disp_state->dev, THIS_MODULE, disp_state, 0, GFP_KERNEL, + disp_devcoredump_read, msm_disp_state_free); +} + +void msm_disp_snapshot_state(struct drm_device *drm_dev) +{ + struct msm_drm_private *priv; + struct msm_kms *kms; + + if (!drm_dev) { + DRM_ERROR("invalid params\n"); + return; + } + + priv = drm_dev->dev_private; + kms = priv->kms; + + kthread_queue_work(kms->dump_worker, &kms->dump_work); +} + +int msm_disp_snapshot_init(struct drm_device *drm_dev) +{ + struct msm_drm_private *priv; + struct msm_kms *kms; + + if (!drm_dev) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + priv = drm_dev->dev_private; + kms = priv->kms; + + mutex_init(&kms->dump_mutex); + + kms->dump_worker = kthread_create_worker(0, "%s", "disp_snapshot"); + if (IS_ERR(kms->dump_worker)) + DRM_ERROR("failed to create disp state task\n"); + + kthread_init_work(&kms->dump_work, _msm_disp_snapshot_work); + + return 0; +} + +void msm_disp_snapshot_destroy(struct drm_device *drm_dev) +{ + struct msm_kms *kms; + struct msm_drm_private *priv; + + if (!drm_dev) { + DRM_ERROR("invalid params\n"); + return; + } + + priv = drm_dev->dev_private; + kms = priv->kms; + + if (kms->dump_worker) + kthread_destroy_worker(kms->dump_worker); + + mutex_destroy(&kms->dump_mutex); +} diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h new file mode 100644 index 000000000..b5f452bd7 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef MSM_DISP_SNAPSHOT_H_ +#define MSM_DISP_SNAPSHOT_H_ + +#include +#include +#include "../../../drm_crtc_internal.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msm_kms.h" + +#define MSM_DISP_SNAPSHOT_MAX_BLKS 10 + +/* debug option to print the registers in logs */ +#define MSM_DISP_SNAPSHOT_DUMP_IN_CONSOLE 0 + +/* print debug ranges in groups of 4 u32s */ +#define REG_DUMP_ALIGN 16 + +/** + * struct msm_disp_state - structure to store current dpu state + * @dev: device pointer + * @drm_dev: drm device pointer + * @atomic_state: atomic state duplicated at the time of the error + * @time: timestamp at which the coredump was captured + */ +struct msm_disp_state { + struct device *dev; + struct drm_device *drm_dev; + + struct list_head blocks; + + struct drm_atomic_state *atomic_state; + + struct timespec64 time; +}; + +/** + * struct msm_disp_state_block - structure to store each hardware block state + * @name: name of the block + * @drm_dev: handle to the linked list head + * @size: size of the register space of this hardware block + * @state: array holding the register dump of this hardware block + * @base_addr: starting address of this hardware block's register space + */ +struct msm_disp_state_block { + char name[SZ_128]; + struct list_head node; + unsigned int size; + u32 *state; + void __iomem *base_addr; +}; + +/** + * msm_disp_snapshot_init - initialize display snapshot + * @drm_dev: drm device handle + * + * Returns: 0 or -ERROR + */ +int msm_disp_snapshot_init(struct drm_device *drm_dev); + +/** + * msm_disp_snapshot_destroy - destroy the display snapshot + * @drm_dev: drm device handle + * + * Returns: none + */ +void msm_disp_snapshot_destroy(struct drm_device *drm_dev); + +/** + * msm_disp_snapshot_state_sync - synchronously snapshot display state + * @kms: the kms object + * + * Returns state or error + * + * Must be called with &kms->dump_mutex held + */ +struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms); + +/** + * msm_disp_snapshot_state - trigger to dump the display snapshot + * @drm_dev: handle to drm device + + * Returns: none + */ +void msm_disp_snapshot_state(struct drm_device *drm_dev); + +/** + * msm_disp_state_print - print out the current dpu state + * @disp_state: handle to drm device + * @p: handle to drm printer + * + * Returns: none + */ +void msm_disp_state_print(struct msm_disp_state *disp_state, struct drm_printer *p); + +/** + * msm_disp_snapshot_capture_state - utility to capture atomic state and hw registers + * @disp_state: handle to msm_disp_state struct + + * Returns: none + */ +void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state); + +/** + * msm_disp_state_free - free the memory after the coredump has been read + * @data: handle to struct msm_disp_state + + * Returns: none + */ +void msm_disp_state_free(void *data); + +/** + * msm_disp_snapshot_add_block - add a hardware block with its register dump + * @disp_state: handle to struct msm_disp_state + * @name: name of the hardware block + * @len: size of the register space of the hardware block + * @base_addr: starting address of the register space of the hardware block + * @fmt: format in which the block names need to be printed + * + * Returns: none + */ +__printf(4, 5) +void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len, + void __iomem *base_addr, const char *fmt, ...); + +#endif /* MSM_DISP_SNAPSHOT_H_ */ diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c new file mode 100644 index 000000000..add72bbc2 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include + +#include "msm_disp_snapshot.h" + +static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *base_addr) +{ + u32 len_padded; + u32 num_rows; + u32 x0, x4, x8, xc; + void __iomem *addr; + u32 *dump_addr = NULL; + void __iomem *end_addr; + int i; + + len_padded = aligned_len * REG_DUMP_ALIGN; + num_rows = aligned_len / REG_DUMP_ALIGN; + + addr = base_addr; + end_addr = base_addr + aligned_len; + + if (!(*reg)) + *reg = kzalloc(len_padded, GFP_KERNEL); + + if (*reg) + dump_addr = *reg; + + for (i = 0; i < num_rows; i++) { + x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0; + x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0; + x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0; + xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0; + + if (dump_addr) { + dump_addr[i * 4] = x0; + dump_addr[i * 4 + 1] = x4; + dump_addr[i * 4 + 2] = x8; + dump_addr[i * 4 + 3] = xc; + } + + addr += REG_DUMP_ALIGN; + } +} + +static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr, + struct drm_printer *p) +{ + int i; + u32 *dump_addr = NULL; + void __iomem *addr; + u32 num_rows; + + addr = base_addr; + num_rows = len / REG_DUMP_ALIGN; + + if (*reg) + dump_addr = *reg; + + for (i = 0; i < num_rows; i++) { + drm_printf(p, "0x%lx : %08x %08x %08x %08x\n", + (unsigned long)(addr - base_addr), + dump_addr[i * 4], dump_addr[i * 4 + 1], + dump_addr[i * 4 + 2], dump_addr[i * 4 + 3]); + addr += REG_DUMP_ALIGN; + } +} + +void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p) +{ + struct msm_disp_state_block *block, *tmp; + + if (!p) { + DRM_ERROR("invalid drm printer\n"); + return; + } + + drm_printf(p, "---\n"); + drm_printf(p, "kernel: " UTS_RELEASE "\n"); + drm_printf(p, "module: " KBUILD_MODNAME "\n"); + drm_printf(p, "dpu devcoredump\n"); + drm_printf(p, "time: %lld.%09ld\n", + state->time.tv_sec, state->time.tv_nsec); + + list_for_each_entry_safe(block, tmp, &state->blocks, node) { + drm_printf(p, "====================%s================\n", block->name); + msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p); + } + + drm_printf(p, "===================dpu drm state================\n"); + + if (state->atomic_state) + drm_atomic_print_new_state(state->atomic_state, p); +} + +static void msm_disp_capture_atomic_state(struct msm_disp_state *disp_state) +{ + struct drm_device *ddev; + struct drm_modeset_acquire_ctx ctx; + + ktime_get_real_ts64(&disp_state->time); + + ddev = disp_state->drm_dev; + + drm_modeset_acquire_init(&ctx, 0); + + while (drm_modeset_lock_all_ctx(ddev, &ctx) != 0) + drm_modeset_backoff(&ctx); + + disp_state->atomic_state = drm_atomic_helper_duplicate_state(ddev, + &ctx); + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state) +{ + struct msm_drm_private *priv; + struct drm_device *drm_dev; + struct msm_kms *kms; + int i; + + drm_dev = disp_state->drm_dev; + priv = drm_dev->dev_private; + kms = priv->kms; + + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (!priv->dp[i]) + continue; + + msm_dp_snapshot(disp_state, priv->dp[i]); + } + + for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { + if (!priv->dsi[i]) + continue; + + msm_dsi_snapshot(disp_state, priv->dsi[i]); + } + + if (kms->funcs->snapshot) + kms->funcs->snapshot(disp_state, kms); + + msm_disp_capture_atomic_state(disp_state); +} + +void msm_disp_state_free(void *data) +{ + struct msm_disp_state *disp_state = data; + struct msm_disp_state_block *block, *tmp; + + if (disp_state->atomic_state) { + drm_atomic_state_put(disp_state->atomic_state); + disp_state->atomic_state = NULL; + } + + list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) { + list_del(&block->node); + kfree(block->state); + kfree(block); + } + + kfree(disp_state); +} + +void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len, + void __iomem *base_addr, const char *fmt, ...) +{ + struct msm_disp_state_block *new_blk; + struct va_format vaf; + va_list va; + + new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL); + if (!new_blk) + return; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + snprintf(new_blk->name, sizeof(new_blk->name), "%pV", &vaf); + + va_end(va); + + INIT_LIST_HEAD(&new_blk->node); + new_blk->size = ALIGN(len, REG_DUMP_ALIGN); + new_blk->base_addr = base_addr; + + msm_disp_state_dump_regs(&new_blk->state, new_blk->size, base_addr); + list_add_tail(&new_blk->node, &disp_state->blocks); +} diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c new file mode 100644 index 000000000..1245c7aa4 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -0,0 +1,667 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + */ + + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include + +#include +#include + +#include "dp_catalog.h" +#include "dp_audio.h" +#include "dp_panel.h" +#include "dp_display.h" + +#define HEADER_BYTE_2_BIT 0 +#define PARITY_BYTE_2_BIT 8 +#define HEADER_BYTE_1_BIT 16 +#define PARITY_BYTE_1_BIT 24 +#define HEADER_BYTE_3_BIT 16 +#define PARITY_BYTE_3_BIT 24 + +struct dp_audio_private { + struct platform_device *audio_pdev; + struct platform_device *pdev; + struct drm_device *drm_dev; + struct dp_catalog *catalog; + struct dp_panel *panel; + + bool engine_on; + u32 channels; + + struct dp_audio dp_audio; +}; + +static u8 dp_audio_get_g0_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[3]; + g[1] = c[0] ^ c[3]; + g[2] = c[1]; + g[3] = c[2]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static u8 dp_audio_get_g1_value(u8 data) +{ + u8 c[4]; + u8 g[4]; + u8 ret_data = 0; + u8 i; + + for (i = 0; i < 4; i++) + c[i] = (data >> i) & 0x01; + + g[0] = c[0] ^ c[3]; + g[1] = c[0] ^ c[1] ^ c[3]; + g[2] = c[1] ^ c[2]; + g[3] = c[2] ^ c[3]; + + for (i = 0; i < 4; i++) + ret_data = ((g[i] & 0x01) << i) | ret_data; + + return ret_data; +} + +static u8 dp_audio_calculate_parity(u32 data) +{ + u8 x0 = 0; + u8 x1 = 0; + u8 ci = 0; + u8 iData = 0; + u8 i = 0; + u8 parity_byte; + u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2; + + for (i = 0; i < num_byte; i++) { + iData = (data >> i*4) & 0xF; + + ci = iData ^ x1; + x1 = x0 ^ dp_audio_get_g1_value(ci); + x0 = dp_audio_get_g0_value(ci); + } + + parity_byte = x1 | (x0 << 4); + + return parity_byte; +} + +static u32 dp_audio_get_header(struct dp_catalog *catalog, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + dp_catalog_audio_get_header(catalog); + + return catalog->audio_data; +} + +static void dp_audio_set_header(struct dp_catalog *catalog, + u32 data, + enum dp_catalog_audio_sdp_type sdp, + enum dp_catalog_audio_header_type header) +{ + catalog->sdp_type = sdp; + catalog->sdp_header = header; + catalog->audio_data = data; + dp_catalog_audio_set_header(catalog); +} + +static void dp_audio_stream_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x02; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + new_value = value; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); + + new_value = audio->channels - 1; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_timestamp_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x1; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x17; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_infoframe_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x84; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x1b; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); + + new_value = (0x0 | (0x11 << 2)); + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + new_value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_copy_management_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x05; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x0F; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2); + + /* Config header and parity byte 3 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); + + new_value = 0x0; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_3_BIT) + | (parity_byte << PARITY_BYTE_3_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3); +} + +static void dp_audio_isrc_sdp(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 value, new_value; + u8 parity_byte; + + /* Config header and parity byte 1 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + new_value = 0x06; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_1_BIT) + | (parity_byte << PARITY_BYTE_1_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1); + + /* Config header and parity byte 2 */ + value = dp_audio_get_header(catalog, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); + + new_value = 0x0F; + parity_byte = dp_audio_calculate_parity(new_value); + value |= ((new_value << HEADER_BYTE_2_BIT) + | (parity_byte << PARITY_BYTE_2_BIT)); + drm_dbg_dp(audio->drm_dev, + "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n", + value, parity_byte); + dp_audio_set_header(catalog, value, + DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2); +} + +static void dp_audio_setup_sdp(struct dp_audio_private *audio) +{ + dp_catalog_audio_config_sdp(audio->catalog); + + dp_audio_stream_sdp(audio); + dp_audio_timestamp_sdp(audio); + dp_audio_infoframe_sdp(audio); + dp_audio_copy_management_sdp(audio); + dp_audio_isrc_sdp(audio); +} + +static void dp_audio_setup_acr(struct dp_audio_private *audio) +{ + u32 select = 0; + struct dp_catalog *catalog = audio->catalog; + + switch (audio->dp_audio.bw_code) { + case DP_LINK_BW_1_62: + select = 0; + break; + case DP_LINK_BW_2_7: + select = 1; + break; + case DP_LINK_BW_5_4: + select = 2; + break; + case DP_LINK_BW_8_1: + select = 3; + break; + default: + drm_dbg_dp(audio->drm_dev, "Unknown link rate\n"); + select = 0; + break; + } + + catalog->audio_data = select; + dp_catalog_audio_config_acr(catalog); +} + +static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) +{ + struct dp_catalog *catalog = audio->catalog; + u32 safe_to_exit_level = 0; + + switch (audio->dp_audio.lane_count) { + case 1: + safe_to_exit_level = 14; + break; + case 2: + safe_to_exit_level = 8; + break; + case 4: + safe_to_exit_level = 5; + break; + default: + drm_dbg_dp(audio->drm_dev, + "setting the default safe_to_exit_level = %u\n", + safe_to_exit_level); + safe_to_exit_level = 14; + break; + } + + catalog->audio_data = safe_to_exit_level; + dp_catalog_audio_sfe_level(catalog); +} + +static void dp_audio_enable(struct dp_audio_private *audio, bool enable) +{ + struct dp_catalog *catalog = audio->catalog; + + catalog->audio_data = enable; + dp_catalog_audio_enable(catalog); + + audio->engine_on = enable; +} + +static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev) +{ + struct dp_audio *dp_audio; + struct msm_dp *dp_display; + + if (!pdev) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + dp_audio = dp_display->dp_audio; + + if (!dp_audio) { + DRM_ERROR("invalid dp_audio data\n"); + return ERR_PTR(-EINVAL); + } + + return container_of(dp_audio, struct dp_audio_private, dp_audio); +} + +static int dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + if (!pdev) { + pr_err("invalid input\n"); + return -ENODEV; + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + pr_err("invalid input\n"); + return -ENODEV; + } + + return dp_display_set_plugged_cb(dp_display, fn, codec_dev); +} + +static int dp_audio_get_eld(struct device *dev, + void *data, uint8_t *buf, size_t len) +{ + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + + if (!pdev) { + DRM_ERROR("invalid input\n"); + return -ENODEV; + } + + dp_display = platform_get_drvdata(pdev); + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return -ENODEV; + } + + memcpy(buf, dp_display->connector->eld, + min(sizeof(dp_display->connector->eld), len)); + + return 0; +} + +int dp_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + int rc = 0; + struct dp_audio_private *audio; + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + dp_display = platform_get_drvdata(pdev); + + /* + * there could be cases where sound card can be opened even + * before OR even when DP is not connected . This can cause + * unclocked access as the audio subsystem relies on the DP + * driver to maintain the correct state of clocks. To protect + * such cases check for connection status and bail out if not + * connected. + */ + if (!dp_display->power_on) { + rc = -EINVAL; + goto end; + } + + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + rc = PTR_ERR(audio); + goto end; + } + + audio->channels = params->channels; + + dp_audio_setup_sdp(audio); + dp_audio_setup_acr(audio); + dp_audio_safe_to_exit_level(audio); + dp_audio_enable(audio, true); + dp_display_signal_audio_start(dp_display); + dp_display->audio_enabled = true; + +end: + return rc; +} + +static void dp_audio_shutdown(struct device *dev, void *data) +{ + struct dp_audio_private *audio; + struct platform_device *pdev; + struct msm_dp *dp_display; + + pdev = to_platform_device(dev); + dp_display = platform_get_drvdata(pdev); + audio = dp_audio_get_data(pdev); + if (IS_ERR(audio)) { + DRM_ERROR("failed to get audio data\n"); + return; + } + + /* + * if audio was not enabled there is no need + * to execute the shutdown and we can bail out early. + * This also makes sure that we dont cause an unclocked + * access when audio subsystem calls this without DP being + * connected. is_connected cannot be used here as its set + * to false earlier than this call + */ + if (!dp_display->audio_enabled) + return; + + dp_audio_enable(audio, false); + /* signal the dp display to safely shutdown clocks */ + dp_display_signal_audio_complete(dp_display); +} + +static const struct hdmi_codec_ops dp_audio_codec_ops = { + .hw_params = dp_audio_hw_params, + .audio_shutdown = dp_audio_shutdown, + .get_eld = dp_audio_get_eld, + .hook_plugged_cb = dp_audio_hook_plugged_cb, +}; + +static struct hdmi_codec_pdata codec_data = { + .ops = &dp_audio_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, +}; + +void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio_priv; + + audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio); + + if (audio_priv->audio_pdev) { + platform_device_unregister(audio_priv->audio_pdev); + audio_priv->audio_pdev = NULL; + } +} + +int dp_register_audio_driver(struct device *dev, + struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio_priv; + + audio_priv = container_of(dp_audio, + struct dp_audio_private, dp_audio); + + audio_priv->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + return PTR_ERR_OR_ZERO(audio_priv->audio_pdev); +} + +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog *catalog) +{ + int rc = 0; + struct dp_audio_private *audio; + struct dp_audio *dp_audio; + + if (!pdev || !panel || !catalog) { + DRM_ERROR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL); + if (!audio) { + rc = -ENOMEM; + goto error; + } + + audio->pdev = pdev; + audio->panel = panel; + audio->catalog = catalog; + + dp_audio = &audio->dp_audio; + + dp_catalog_audio_init(catalog); + + return dp_audio; +error: + return ERR_PTR(rc); +} + +void dp_audio_put(struct dp_audio *dp_audio) +{ + struct dp_audio_private *audio; + + if (!dp_audio) + return; + + audio = container_of(dp_audio, struct dp_audio_private, dp_audio); + + devm_kfree(&audio->pdev->dev, audio); +} diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h new file mode 100644 index 000000000..4ab78880a --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUDIO_H_ +#define _DP_AUDIO_H_ + +#include + +#include "dp_panel.h" +#include "dp_catalog.h" +#include + +/** + * struct dp_audio + * @lane_count: number of lanes configured in current session + * @bw_code: link rate's bandwidth code for current session + */ +struct dp_audio { + u32 lane_count; + u32 bw_code; +}; + +/** + * dp_audio_get() + * + * Creates and instance of dp audio. + * + * @pdev: caller's platform device instance. + * @panel: an instance of dp_panel module. + * @catalog: an instance of dp_catalog module. + * + * Returns the error code in case of failure, otherwize + * an instance of newly created dp_module. + */ +struct dp_audio *dp_audio_get(struct platform_device *pdev, + struct dp_panel *panel, + struct dp_catalog *catalog); + +/** + * dp_register_audio_driver() + * + * Registers DP device with hdmi_codec interface. + * + * @dev: DP device instance. + * @dp_audio: an instance of dp_audio module. + * + * + * Returns the error code in case of failure, otherwise + * zero on success. + */ +int dp_register_audio_driver(struct device *dev, + struct dp_audio *dp_audio); + +void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio); + +/** + * dp_audio_put() + * + * Cleans the dp_audio instance. + * + * @dp_audio: an instance of dp_audio. + */ +void dp_audio_put(struct dp_audio *dp_audio); + +int dp_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params); + +#endif /* _DP_AUDIO_H_ */ + + diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c new file mode 100644 index 000000000..84f9e3e5f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -0,0 +1,541 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include "dp_reg.h" +#include "dp_aux.h" + +enum msm_dp_aux_err { + DP_AUX_ERR_NONE, + DP_AUX_ERR_ADDR, + DP_AUX_ERR_TOUT, + DP_AUX_ERR_NACK, + DP_AUX_ERR_DEFER, + DP_AUX_ERR_NACK_DEFER, + DP_AUX_ERR_PHY, +}; + +struct dp_aux_private { + struct device *dev; + struct dp_catalog *catalog; + + struct mutex mutex; + struct completion comp; + + enum msm_dp_aux_err aux_error_num; + u32 retry_cnt; + bool cmd_busy; + bool native; + bool read; + bool no_send_addr; + bool no_send_stop; + bool initted; + bool is_edp; + u32 offset; + u32 segment; + + struct drm_dp_aux dp_aux; +}; + +#define MAX_AUX_RETRIES 5 + +static ssize_t dp_aux_write(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u8 data[4]; + u32 reg; + ssize_t len; + u8 *msgdata = msg->buffer; + int const AUX_CMD_FIFO_LEN = 128; + int i = 0; + + if (aux->read) + len = 0; + else + len = msg->size; + + /* + * cmd fifo only has depth of 144 bytes + * limit buf length to 128 bytes here + */ + if (len > AUX_CMD_FIFO_LEN - 4) { + DRM_ERROR("buf size greater than allowed size of 128 bytes\n"); + return -EINVAL; + } + + /* Pack cmd and write to HW */ + data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */ + if (aux->read) + data[0] |= BIT(4); /* R/W */ + + data[1] = msg->address >> 8; /* addr[15:8] */ + data[2] = msg->address; /* addr[7:0] */ + data[3] = msg->size - 1; /* len[7:0] */ + + for (i = 0; i < len + 4; i++) { + reg = (i < 4) ? data[i] : msgdata[i - 4]; + reg <<= DP_AUX_DATA_OFFSET; + reg &= DP_AUX_DATA_MASK; + reg |= DP_AUX_DATA_WRITE; + /* index = 0, write */ + if (i == 0) + reg |= DP_AUX_DATA_INDEX_WRITE; + aux->catalog->aux_data = reg; + dp_catalog_aux_write_data(aux->catalog); + } + + dp_catalog_aux_clear_trans(aux->catalog, false); + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + + reg = 0; /* Transaction number == 1 */ + if (!aux->native) { /* i2c */ + reg |= DP_AUX_TRANS_CTRL_I2C; + + if (aux->no_send_addr) + reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR; + + if (aux->no_send_stop) + reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP; + } + + reg |= DP_AUX_TRANS_CTRL_GO; + aux->catalog->aux_data = reg; + dp_catalog_aux_write_trans(aux->catalog); + + return len; +} + +static ssize_t dp_aux_cmd_fifo_tx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + ssize_t ret; + unsigned long time_left; + + reinit_completion(&aux->comp); + + ret = dp_aux_write(aux, msg); + if (ret < 0) + return ret; + + time_left = wait_for_completion_timeout(&aux->comp, + msecs_to_jiffies(250)); + if (!time_left) + return -ETIMEDOUT; + + return ret; +} + +static ssize_t dp_aux_cmd_fifo_rx(struct dp_aux_private *aux, + struct drm_dp_aux_msg *msg) +{ + u32 data; + u8 *dp; + u32 i, actual_i; + u32 len = msg->size; + + dp_catalog_aux_clear_trans(aux->catalog, true); + + data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */ + data |= DP_AUX_DATA_READ; /* read */ + + aux->catalog->aux_data = data; + dp_catalog_aux_write_data(aux->catalog); + + dp = msg->buffer; + + /* discard first byte */ + data = dp_catalog_aux_read_data(aux->catalog); + + for (i = 0; i < len; i++) { + data = dp_catalog_aux_read_data(aux->catalog); + *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff); + + actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF; + if (i != actual_i) + break; + } + + return i; +} + +static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg) +{ + u32 edid_address = 0x50; + u32 segment_address = 0x30; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + u8 *data; + + if (aux->native || i2c_read || ((input_msg->address != edid_address) && + (input_msg->address != segment_address))) + return; + + + data = input_msg->buffer; + if (input_msg->address == segment_address) + aux->segment = *data; + else + aux->offset = *data; +} + +/** + * dp_aux_transfer_helper() - helper function for EDID read transactions + * + * @aux: DP AUX private structure + * @input_msg: input message from DRM upstream APIs + * @send_seg: send the segment to sink + * + * return: void + * + * This helper function is used to fix EDID reads for non-compliant + * sinks that do not handle the i2c middle-of-transaction flag correctly. + */ +static void dp_aux_transfer_helper(struct dp_aux_private *aux, + struct drm_dp_aux_msg *input_msg, + bool send_seg) +{ + struct drm_dp_aux_msg helper_msg; + u32 message_size = 0x10; + u32 segment_address = 0x30; + u32 const edid_block_length = 0x80; + bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT; + bool i2c_read = input_msg->request & + (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + + if (!i2c_mot || !i2c_read || (input_msg->size == 0)) + return; + + /* + * Sending the segment value and EDID offset will be performed + * from the DRM upstream EDID driver for each block. Avoid + * duplicate AUX transactions related to this while reading the + * first 16 bytes of each block. + */ + if (!(aux->offset % edid_block_length) || !send_seg) + goto end; + + aux->read = false; + aux->cmd_busy = true; + aux->no_send_addr = true; + aux->no_send_stop = true; + + /* + * Send the segment address for every i2c read in which the + * middle-of-tranaction flag is set. This is required to support EDID + * reads of more than 2 blocks as the segment address is reset to 0 + * since we are overriding the middle-of-transaction flag for read + * transactions. + */ + + if (aux->segment) { + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = segment_address; + helper_msg.buffer = &aux->segment; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + } + + /* + * Send the offset address for every i2c read in which the + * middle-of-transaction flag is set. This will ensure that the sink + * will update its read pointer and return the correct portion of the + * EDID buffer in the subsequent i2c read trasntion triggered in the + * native AUX transfer function. + */ + memset(&helper_msg, 0, sizeof(helper_msg)); + helper_msg.address = input_msg->address; + helper_msg.buffer = &aux->offset; + helper_msg.size = 1; + dp_aux_cmd_fifo_tx(aux, &helper_msg); + +end: + aux->offset += message_size; + if (aux->offset == 0x80 || aux->offset == 0x100) + aux->segment = 0x0; /* reset segment at end of block */ +} + +/* + * This function does the real job to process an AUX transaction. + * It will call aux_reset() function to reset the AUX channel, + * if the waiting is timeout. + */ +static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, + struct drm_dp_aux_msg *msg) +{ + ssize_t ret; + int const aux_cmd_native_max = 16; + int const aux_cmd_i2c_max = 128; + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); + + /* Ignore address only message */ + if (msg->size == 0 || !msg->buffer) { + msg->reply = aux->native ? + DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + return msg->size; + } + + /* msg sanity check */ + if ((aux->native && msg->size > aux_cmd_native_max) || + msg->size > aux_cmd_i2c_max) { + DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n", + __func__, msg->size, msg->request); + return -EINVAL; + } + + mutex_lock(&aux->mutex); + if (!aux->initted) { + ret = -EIO; + goto exit; + } + + /* + * For eDP it's important to give a reasonably long wait here for HPD + * to be asserted. This is because the panel driver may have _just_ + * turned on the panel and then tried to do an AUX transfer. The panel + * driver has no way of knowing when the panel is ready, so it's up + * to us to wait. For DP we never get into this situation so let's + * avoid ever doing the extra long wait for DP. + */ + if (aux->is_edp) { + ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog); + if (ret) { + DRM_DEBUG_DP("Panel not ready for aux transactions\n"); + goto exit; + } + } + + dp_aux_update_offset_and_segment(aux, msg); + dp_aux_transfer_helper(aux, msg, true); + + aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); + aux->cmd_busy = true; + + if (aux->read) { + aux->no_send_addr = true; + aux->no_send_stop = false; + } else { + aux->no_send_addr = true; + aux->no_send_stop = true; + } + + ret = dp_aux_cmd_fifo_tx(aux, msg); + if (ret < 0) { + if (aux->native) { + aux->retry_cnt++; + if (!(aux->retry_cnt % MAX_AUX_RETRIES)) + dp_catalog_aux_update_cfg(aux->catalog); + } + /* reset aux if link is in connected state */ + if (dp_catalog_link_is_connected(aux->catalog)) + dp_catalog_aux_reset(aux->catalog); + } else { + aux->retry_cnt = 0; + switch (aux->aux_error_num) { + case DP_AUX_ERR_NONE: + if (aux->read) + ret = dp_aux_cmd_fifo_rx(aux, msg); + msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK; + break; + case DP_AUX_ERR_DEFER: + msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER; + break; + case DP_AUX_ERR_PHY: + case DP_AUX_ERR_ADDR: + case DP_AUX_ERR_NACK: + case DP_AUX_ERR_NACK_DEFER: + msg->reply = aux->native ? DP_AUX_NATIVE_REPLY_NACK : DP_AUX_I2C_REPLY_NACK; + break; + case DP_AUX_ERR_TOUT: + ret = -ETIMEDOUT; + break; + } + } + + aux->cmd_busy = false; + +exit: + mutex_unlock(&aux->mutex); + + return ret; +} + +void dp_aux_isr(struct drm_dp_aux *dp_aux) +{ + u32 isr; + struct dp_aux_private *aux; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + isr = dp_catalog_aux_get_irq(aux->catalog); + + /* no interrupts pending, return immediately */ + if (!isr) + return; + + if (!aux->cmd_busy) { + DRM_ERROR("Unexpected DP AUX IRQ %#010x when not busy\n", isr); + return; + } + + /* + * The logic below assumes only one error bit is set (other than "done" + * which can apparently be set at the same time as some of the other + * bits). Warn if more than one get set so we know we need to improve + * the logic. + */ + if (hweight32(isr & ~DP_INTR_AUX_XFER_DONE) > 1) + DRM_WARN("Some DP AUX interrupts unhandled: %#010x\n", isr); + + if (isr & DP_INTR_AUX_ERROR) { + aux->aux_error_num = DP_AUX_ERR_PHY; + dp_catalog_aux_clear_hw_interrupts(aux->catalog); + } else if (isr & DP_INTR_NACK_DEFER) { + aux->aux_error_num = DP_AUX_ERR_NACK_DEFER; + } else if (isr & DP_INTR_WRONG_ADDR) { + aux->aux_error_num = DP_AUX_ERR_ADDR; + } else if (isr & DP_INTR_TIMEOUT) { + aux->aux_error_num = DP_AUX_ERR_TOUT; + } else if (!aux->native && (isr & DP_INTR_I2C_NACK)) { + aux->aux_error_num = DP_AUX_ERR_NACK; + } else if (!aux->native && (isr & DP_INTR_I2C_DEFER)) { + if (isr & DP_INTR_AUX_XFER_DONE) + aux->aux_error_num = DP_AUX_ERR_NACK; + else + aux->aux_error_num = DP_AUX_ERR_DEFER; + } else if (isr & DP_INTR_AUX_XFER_DONE) { + aux->aux_error_num = DP_AUX_ERR_NONE; + } else { + DRM_WARN("Unexpected interrupt: %#010x\n", isr); + return; + } + + complete(&aux->comp); +} + +void dp_aux_reconfig(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + dp_catalog_aux_update_cfg(aux->catalog); + dp_catalog_aux_reset(aux->catalog); +} + +void dp_aux_init(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_lock(&aux->mutex); + + dp_catalog_aux_enable(aux->catalog, true); + aux->retry_cnt = 0; + aux->initted = true; + + mutex_unlock(&aux->mutex); +} + +void dp_aux_deinit(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_lock(&aux->mutex); + + aux->initted = false; + dp_catalog_aux_enable(aux->catalog, false); + + mutex_unlock(&aux->mutex); +} + +int dp_aux_register(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + int ret; + + if (!dp_aux) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + aux->dp_aux.name = "dpu_dp_aux"; + aux->dp_aux.dev = aux->dev; + aux->dp_aux.transfer = dp_aux_transfer; + ret = drm_dp_aux_register(&aux->dp_aux); + if (ret) { + DRM_ERROR("%s: failed to register drm aux: %d\n", __func__, + ret); + return ret; + } + + return 0; +} + +void dp_aux_unregister(struct drm_dp_aux *dp_aux) +{ + drm_dp_aux_unregister(dp_aux); +} + +struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, + bool is_edp) +{ + struct dp_aux_private *aux; + + if (!catalog) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-ENODEV); + } + + aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL); + if (!aux) + return ERR_PTR(-ENOMEM); + + init_completion(&aux->comp); + aux->cmd_busy = false; + aux->is_edp = is_edp; + mutex_init(&aux->mutex); + + aux->dev = dev; + aux->catalog = catalog; + aux->retry_cnt = 0; + + return &aux->dp_aux; +} + +void dp_aux_put(struct drm_dp_aux *dp_aux) +{ + struct dp_aux_private *aux; + + if (!dp_aux) + return; + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + + mutex_destroy(&aux->mutex); + + devm_kfree(aux->dev, aux); +} diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h new file mode 100644 index 000000000..e930974bc --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_aux.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_AUX_H_ +#define _DP_AUX_H_ + +#include "dp_catalog.h" +#include + +int dp_aux_register(struct drm_dp_aux *dp_aux); +void dp_aux_unregister(struct drm_dp_aux *dp_aux); +void dp_aux_isr(struct drm_dp_aux *dp_aux); +void dp_aux_init(struct drm_dp_aux *dp_aux); +void dp_aux_deinit(struct drm_dp_aux *dp_aux); +void dp_aux_reconfig(struct drm_dp_aux *dp_aux); + +struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog, + bool is_edp); +void dp_aux_put(struct drm_dp_aux *aux); + +#endif /*__DP_AUX_H_*/ diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c new file mode 100644 index 000000000..421391755 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -0,0 +1,1096 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include + +#include "dp_catalog.h" +#include "dp_reg.h" + +#define POLLING_SLEEP_US 1000 +#define POLLING_TIMEOUT_US 10000 + +#define SCRAMBLER_RESET_COUNT_VALUE 0xFC + +#define DP_INTERRUPT_STATUS_ACK_SHIFT 1 +#define DP_INTERRUPT_STATUS_MASK_SHIFT 2 + +#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4) + +#define DP_INTERRUPT_STATUS1 \ + (DP_INTR_AUX_XFER_DONE| \ + DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \ + DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \ + DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \ + DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR) + +#define DP_INTERRUPT_STATUS1_ACK \ + (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT) +#define DP_INTERRUPT_STATUS1_MASK \ + (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT) + +#define DP_INTERRUPT_STATUS2 \ + (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \ + DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED) + +#define DP_INTERRUPT_STATUS2_ACK \ + (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT) +#define DP_INTERRUPT_STATUS2_MASK \ + (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT) + +struct dp_catalog_private { + struct device *dev; + struct drm_device *drm_dev; + struct dp_io *io; + u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX]; + struct dp_catalog dp_catalog; + u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX]; +}; + +void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dss_io_data *dss = &catalog->io->dp_controller; + + msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb"); + msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux"); + msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link"); + msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0"); +} + +static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) +{ + return readl_relaxed(catalog->io->dp_controller.aux.base + offset); +} + +static inline void dp_write_aux(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + /* + * To make sure aux reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.aux.base + offset); +} + +static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset) +{ + return readl_relaxed(catalog->io->dp_controller.ahb.base + offset); +} + +static inline void dp_write_ahb(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + /* + * To make sure phy reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.ahb.base + offset); +} + +static inline void dp_write_p0(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + /* + * To make sure interface reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.p0.base + offset); +} + +static inline u32 dp_read_p0(struct dp_catalog_private *catalog, + u32 offset) +{ + /* + * To make sure interface reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + return readl_relaxed(catalog->io->dp_controller.p0.base + offset); +} + +static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) +{ + return readl_relaxed(catalog->io->dp_controller.link.base + offset); +} + +static inline void dp_write_link(struct dp_catalog_private *catalog, + u32 offset, u32 data) +{ + /* + * To make sure link reg writes happens before any other operation, + * this function uses writel() instread of writel_relaxed() + */ + writel(data, catalog->io->dp_controller.link.base + offset); +} + +/* aux related catalog functions */ +u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_aux(catalog, REG_DP_AUX_DATA); +} + +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data); + return 0; +} + +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data); + return 0; +} + +int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read) +{ + u32 data; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (read) { + data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL); + data &= ~DP_AUX_TRANS_CTRL_GO; + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data); + } else { + dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0); + } + return 0; +} + +int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0); + return 0; +} + +/** + * dp_catalog_aux_reset() - reset AUX controller + * + * @dp_catalog: DP catalog structure + * + * return: void + * + * This function reset AUX controller + * + * NOTE: reset AUX controller will also clear any pending HPD related interrupts + * + */ +void dp_catalog_aux_reset(struct dp_catalog *dp_catalog) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + + aux_ctrl |= DP_AUX_CTRL_RESET; + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); + usleep_range(1000, 1100); /* h/w recommended delay */ + + aux_ctrl &= ~DP_AUX_CTRL_RESET; + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); +} + +void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable) +{ + u32 aux_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL); + + if (enable) { + dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff); + dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff); + aux_ctrl |= DP_AUX_CTRL_ENABLE; + } else { + aux_ctrl &= ~DP_AUX_CTRL_ENABLE; + } + + dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl); +} + +void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dp_io *dp_io = catalog->io; + struct phy *phy = dp_io->phy; + + phy_calibrate(phy); +} + +int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog) +{ + u32 state; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + /* poll for hpd connected status every 2ms and timeout after 500ms */ + return readl_poll_timeout(catalog->io->dp_controller.aux.base + + REG_DP_DP_HPD_INT_STATUS, + state, state & DP_DP_HPD_STATE_STATUS_CONNECTED, + 2000, 500000); +} + +static void dump_regs(void __iomem *base, int len) +{ + int i; + u32 x0, x4, x8, xc; + u32 addr_off = 0; + + len = DIV_ROUND_UP(len, 16); + for (i = 0; i < len; i++) { + x0 = readl_relaxed(base + addr_off); + x4 = readl_relaxed(base + addr_off + 0x04); + x8 = readl_relaxed(base + addr_off + 0x08); + xc = readl_relaxed(base + addr_off + 0x0c); + + pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc); + addr_off += 16; + } +} + +void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dss_io_data *io = &catalog->io->dp_controller; + + pr_info("AHB regs\n"); + dump_regs(io->ahb.base, io->ahb.len); + + pr_info("AUXCLK regs\n"); + dump_regs(io->aux.base, io->aux.len); + + pr_info("LCLK regs\n"); + dump_regs(io->link.base, io->link.len); + + pr_info("P0CLK regs\n"); + dump_regs(io->p0.base, io->p0.len); +} + +u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 intr, intr_ack; + + intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS); + intr &= ~DP_INTERRUPT_STATUS1_MASK; + intr_ack = (intr & DP_INTERRUPT_STATUS1) + << DP_INTERRUPT_STATUS_ACK_SHIFT; + dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack | + DP_INTERRUPT_STATUS1_MASK); + + return intr; + +} + +/* controller related catalog functions */ +void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, + u32 dp_tu, u32 valid_boundary, + u32 valid_boundary2) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary); + dp_write_link(catalog, REG_DP_TU, dp_tu); + dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2); +} + +void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_link(catalog, REG_DP_STATE_CTRL, state); +} + +void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg); + + dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg); +} + +void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */ + u32 ln_mapping; + + ln_mapping = ln_0 << LANE0_MAPPING_SHIFT; + ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT; + ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT; + ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT; + + dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING, + ln_mapping); +} + +void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, + bool enable) +{ + u32 mainlink_ctrl; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable); + if (enable) { + /* + * To make sure link reg writes happens before other operation, + * dp_write_link() function uses writel() + */ + mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + + mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET | + DP_MAINLINK_CTRL_ENABLE); + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl |= DP_MAINLINK_CTRL_RESET; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + + mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE | + DP_MAINLINK_FB_BOUNDARY_SEL); + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + } else { + mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl); + } +} + +void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, + u32 colorimetry_cfg, + u32 test_bits_depth) +{ + u32 misc_val; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0); + + /* clear bpp bits */ + misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT); + misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT; + misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT; + /* Configure clock to synchronous mode */ + misc_val |= DP_MISC0_SYNCHRONOUS_CLK; + + drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val); + dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val); +} + +void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, + u32 rate, u32 stream_rate_khz, + bool fixed_nvid) +{ + u32 pixel_m, pixel_n; + u32 mvid, nvid, pixel_div = 0, dispcc_input_rate; + u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE; + u32 const link_rate_hbr2 = 540000; + u32 const link_rate_hbr3 = 810000; + unsigned long den, num; + + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (rate == link_rate_hbr3) + pixel_div = 6; + else if (rate == 162000 || rate == 270000) + pixel_div = 2; + else if (rate == link_rate_hbr2) + pixel_div = 4; + else + DRM_ERROR("Invalid pixel mux divider\n"); + + dispcc_input_rate = (rate * 10) / pixel_div; + + rational_best_approximation(dispcc_input_rate, stream_rate_khz, + (unsigned long)(1 << 16) - 1, + (unsigned long)(1 << 16) - 1, &den, &num); + + den = ~(den - num); + den = den & 0xFFFF; + pixel_m = num; + pixel_n = den; + + mvid = (pixel_m & 0xFFFF) * 5; + nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF); + + if (nvid < nvid_fixed) { + u32 temp; + + temp = (nvid_fixed / nvid) * nvid; + mvid = (nvid_fixed / nvid) * mvid; + nvid = temp; + } + + if (link_rate_hbr2 == rate) + nvid *= 2; + + if (link_rate_hbr3 == rate) + nvid *= 3; + + drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid); + dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid); + dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid); + dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0); +} + +int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, + u32 state_bit) +{ + int bit, ret; + u32 data; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + bit = BIT(state_bit - 1); + drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit); + dp_catalog_ctrl_state_ctrl(dp_catalog, bit); + + bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; + + /* Poll for mainlink ready status */ + ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base + + REG_DP_MAINLINK_READY, + data, data & bit, + POLLING_SLEEP_US, POLLING_TIMEOUT_US); + if (ret < 0) { + DRM_ERROR("set state_bit for link_train=%d failed\n", state_bit); + return ret; + } + return 0; +} + +/** + * dp_catalog_hw_revision() - retrieve DP hw revision + * + * @dp_catalog: DP catalog structure + * + * Return: DP controller hw revision + * + */ +u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog) +{ + const struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_ahb(catalog, REG_DP_HW_VERSION); +} + +/** + * dp_catalog_ctrl_reset() - reset DP controller + * + * @dp_catalog: DP catalog structure + * + * return: void + * + * This function reset the DP controller + * + * NOTE: reset DP controller will also clear any pending HPD related interrupts + * + */ +void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog) +{ + u32 sw_reset; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET); + + sw_reset |= DP_SW_RESET; + dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); + usleep_range(1000, 1100); /* h/w recommended delay */ + + sw_reset &= ~DP_SW_RESET; + dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset); +} + +bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) +{ + u32 data; + int ret; + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + /* Poll for mainlink ready status */ + ret = readl_poll_timeout(catalog->io->dp_controller.link.base + + REG_DP_MAINLINK_READY, + data, data & DP_MAINLINK_READY_FOR_VIDEO, + POLLING_SLEEP_US, POLLING_TIMEOUT_US); + if (ret < 0) { + DRM_ERROR("mainlink not ready\n"); + return false; + } + + return true; +} + +void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, + bool enable) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + if (enable) { + dp_write_ahb(catalog, REG_DP_INTR_STATUS, + DP_INTERRUPT_STATUS1_MASK); + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + DP_INTERRUPT_STATUS2_MASK); + } else { + dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00); + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00); + } +} + +void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, + u32 intr_mask, bool en) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + + config = (en ? config | intr_mask : config & ~intr_mask); + + drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n", + intr_mask, config); + dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK, + config & DP_DP_HPD_INT_MASK); +} + +void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER); + + /* Configure REFTIMER and enable it */ + reftimer |= DP_DP_HPD_REFTIMER_ENABLE; + dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer); + + /* Enable HPD */ + dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN); +} + +u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 status; + + status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status); + status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT; + status &= DP_DP_HPD_STATE_STATUS_BITS_MASK; + + return status; +} + +u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + int isr, mask; + + isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS); + dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK, + (isr & DP_DP_HPD_INT_MASK)); + mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK); + + /* + * We only want to return interrupts that are unmasked to the caller. + * However, the interrupt status field also contains other + * informational bits about the HPD state status, so we only mask + * out the part of the register that tells us about which interrupts + * are pending. + */ + return isr & (mask | ~DP_DP_HPD_INT_MASK); +} + +int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 intr, intr_ack; + + intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2); + intr &= ~DP_INTERRUPT_STATUS2_MASK; + intr_ack = (intr & DP_INTERRUPT_STATUS2) + << DP_INTERRUPT_STATUS_ACK_SHIFT; + dp_write_ahb(catalog, REG_DP_INTR_STATUS2, + intr_ack | DP_INTERRUPT_STATUS2_MASK); + + return intr; +} + +void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_ahb(catalog, REG_DP_PHY_CTRL, + DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL); + usleep_range(1000, 1100); /* h/w recommended delay */ + dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0); +} + +int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, + u8 v_level, u8 p_level) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + struct dp_io *dp_io = catalog->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + /* TODO: Update for all lanes instead of just first one */ + opts_dp->voltage[0] = v_level; + opts_dp->pre[0] = p_level; + opts_dp->set_voltages = 1; + phy_configure(phy, &dp_io->phy_opts); + opts_dp->set_voltages = 0; + + return 0; +} + +void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, + u32 pattern) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 value = 0x0; + + /* Make sure to clear the current pattern before starting a new one */ + dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0); + + drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern); + switch (pattern) { + case DP_PHY_TEST_PATTERN_D10_2: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TRAINING_PATTERN1); + break; + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + value &= ~(1 << 16); + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + value |= SCRAMBLER_RESET_COUNT_VALUE; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); + break; + case DP_PHY_TEST_PATTERN_PRBS7: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_PRBS7); + break; + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN); + /* 00111110000011111000001111100000 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0, + 0x3E0F83E0); + /* 00001111100000111110000011111000 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1, + 0x0F83E0F8); + /* 1111100000111110 */ + dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2, + 0x0000F83E); + break; + case DP_PHY_TEST_PATTERN_CP2520: + value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + + value = DP_HBR2_ERM_PATTERN; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + value |= SCRAMBLER_RESET_COUNT_VALUE; + dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET, + value); + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, + DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE); + value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL); + value |= DP_MAINLINK_CTRL_ENABLE; + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value); + break; + case DP_PHY_TEST_PATTERN_SEL_MASK: + dp_write_link(catalog, REG_DP_MAINLINK_CTRL, + DP_MAINLINK_CTRL_ENABLE); + dp_write_link(catalog, REG_DP_STATE_CTRL, + DP_STATE_CTRL_LINK_TRAINING_PATTERN4); + break; + default: + drm_dbg_dp(catalog->drm_dev, + "No valid test pattern requested: %#x\n", pattern); + break; + } +} + +u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + return dp_read_link(catalog, REG_DP_MAINLINK_READY); +} + +/* panel related catalog functions */ +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 reg; + + dp_write_link(catalog, REG_DP_TOTAL_HOR_VER, + dp_catalog->total); + dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC, + dp_catalog->sync_start); + dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY, + dp_catalog->width_blanking); + dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active); + + reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG); + + if (dp_catalog->wide_bus_en) + reg |= DP_INTF_CONFIG_DATABUS_WIDEN; + else + reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN; + + + DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg); + + dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg); + return 0; +} + +void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, + struct drm_display_mode *drm_mode) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + u32 hsync_period, vsync_period; + u32 display_v_start, display_v_end; + u32 hsync_start_x, hsync_end_x; + u32 v_sync_width; + u32 hsync_ctl; + u32 display_hctl; + + /* TPG config parameters*/ + hsync_period = drm_mode->htotal; + vsync_period = drm_mode->vtotal; + + display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) * + hsync_period); + display_v_end = ((vsync_period - (drm_mode->vsync_start - + drm_mode->vdisplay)) + * hsync_period) - 1; + + display_v_start += drm_mode->htotal - drm_mode->hsync_start; + display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay); + + hsync_start_x = drm_mode->htotal - drm_mode->hsync_start; + hsync_end_x = hsync_period - (drm_mode->hsync_start - + drm_mode->hdisplay) - 1; + + v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start; + + hsync_ctl = (hsync_period << 16) | + (drm_mode->hsync_end - drm_mode->hsync_start); + display_hctl = (hsync_end_x << 16) | hsync_start_x; + + + dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0); + dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period * + hsync_period); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width * + hsync_period); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0); + dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end); + dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0); + dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0); + + dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, + DP_TPG_CHECKERED_RECT_PATTERN); + dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG, + DP_TPG_VIDEO_CONFIG_BPP_8BIT | + DP_TPG_VIDEO_CONFIG_RGB); + dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, + DP_BIST_ENABLE_DPBIST_EN); + dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, + DP_TIMING_ENGINE_EN_EN); + drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__); +} + +void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0); + dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0); + dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0); +} + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io) +{ + struct dp_catalog_private *catalog; + + if (!io) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + catalog = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL); + if (!catalog) + return ERR_PTR(-ENOMEM); + + catalog->dev = dev; + catalog->io = io; + + return &catalog->dp_catalog; +} + +void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_map = catalog->audio_map; + sdp = dp_catalog->sdp_type; + header = dp_catalog->sdp_header; + + dp_catalog->audio_data = dp_read_link(catalog, + sdp_map[sdp][header]); +} + +void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX]; + enum dp_catalog_audio_sdp_type sdp; + enum dp_catalog_audio_header_type header; + u32 data; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_map = catalog->audio_map; + sdp = dp_catalog->sdp_type; + header = dp_catalog->sdp_header; + data = dp_catalog->audio_data; + + dp_write_link(catalog, sdp_map[sdp][header], data); +} + +void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 acr_ctrl, select; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + select = dp_catalog->audio_data; + acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14); + + drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n", + select, acr_ctrl); + + dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl); +} + +void dp_catalog_audio_enable(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + bool enable; + u32 audio_ctrl; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + enable = !!dp_catalog->audio_data; + audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG); + + if (enable) + audio_ctrl |= BIT(0); + else + audio_ctrl &= ~BIT(0); + + drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl); + + dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl); + /* make sure audio engine is disabled */ + wmb(); +} + +void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 sdp_cfg = 0; + u32 sdp_cfg2 = 0; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG); + /* AUDIO_TIMESTAMP_SDP_EN */ + sdp_cfg |= BIT(1); + /* AUDIO_STREAM_SDP_EN */ + sdp_cfg |= BIT(2); + /* AUDIO_COPY_MANAGEMENT_SDP_EN */ + sdp_cfg |= BIT(5); + /* AUDIO_ISRC_SDP_EN */ + sdp_cfg |= BIT(6); + /* AUDIO_INFOFRAME_SDP_EN */ + sdp_cfg |= BIT(20); + + drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg); + + dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg); + + sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2); + /* IFRM_REGSRC -> Do not use reg values */ + sdp_cfg2 &= ~BIT(0); + /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */ + sdp_cfg2 &= ~BIT(1); + + drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2); + + dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2); +} + +void dp_catalog_audio_init(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + + static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = { + { + MMSS_DP_AUDIO_STREAM_0, + MMSS_DP_AUDIO_STREAM_1, + MMSS_DP_AUDIO_STREAM_1, + }, + { + MMSS_DP_AUDIO_TIMESTAMP_0, + MMSS_DP_AUDIO_TIMESTAMP_1, + MMSS_DP_AUDIO_TIMESTAMP_1, + }, + { + MMSS_DP_AUDIO_INFOFRAME_0, + MMSS_DP_AUDIO_INFOFRAME_1, + MMSS_DP_AUDIO_INFOFRAME_1, + }, + { + MMSS_DP_AUDIO_COPYMANAGEMENT_0, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + MMSS_DP_AUDIO_COPYMANAGEMENT_1, + }, + { + MMSS_DP_AUDIO_ISRC_0, + MMSS_DP_AUDIO_ISRC_1, + MMSS_DP_AUDIO_ISRC_1, + }, + }; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + catalog->audio_map = sdp_map; +} + +void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog) +{ + struct dp_catalog_private *catalog; + u32 mainlink_levels, safe_to_exit_level; + + if (!dp_catalog) + return; + + catalog = container_of(dp_catalog, + struct dp_catalog_private, dp_catalog); + + safe_to_exit_level = dp_catalog->audio_data; + mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS); + mainlink_levels &= 0xFE0; + mainlink_levels |= safe_to_exit_level; + + drm_dbg_dp(catalog->drm_dev, + "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n", + mainlink_levels, safe_to_exit_level); + + dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels); +} diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h new file mode 100644 index 000000000..f36b7b372 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_catalog.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CATALOG_H_ +#define _DP_CATALOG_H_ + +#include + +#include "dp_parser.h" +#include "disp/msm_disp_snapshot.h" + +/* interrupts */ +#define DP_INTR_HPD BIT(0) +#define DP_INTR_AUX_XFER_DONE BIT(3) +#define DP_INTR_WRONG_ADDR BIT(6) +#define DP_INTR_TIMEOUT BIT(9) +#define DP_INTR_NACK_DEFER BIT(12) +#define DP_INTR_WRONG_DATA_CNT BIT(15) +#define DP_INTR_I2C_NACK BIT(18) +#define DP_INTR_I2C_DEFER BIT(21) +#define DP_INTR_PLL_UNLOCKED BIT(24) +#define DP_INTR_AUX_ERROR BIT(27) + +#define DP_INTR_READY_FOR_VIDEO BIT(0) +#define DP_INTR_IDLE_PATTERN_SENT BIT(3) +#define DP_INTR_FRAME_END BIT(6) +#define DP_INTR_CRC_UPDATED BIT(9) + +#define DP_AUX_CFG_MAX_VALUE_CNT 3 + +/* PHY AUX config registers */ +enum dp_phy_aux_config_type { + PHY_AUX_CFG0, + PHY_AUX_CFG1, + PHY_AUX_CFG2, + PHY_AUX_CFG3, + PHY_AUX_CFG4, + PHY_AUX_CFG5, + PHY_AUX_CFG6, + PHY_AUX_CFG7, + PHY_AUX_CFG8, + PHY_AUX_CFG9, + PHY_AUX_CFG_MAX, +}; + +enum dp_catalog_audio_sdp_type { + DP_AUDIO_SDP_STREAM, + DP_AUDIO_SDP_TIMESTAMP, + DP_AUDIO_SDP_INFOFRAME, + DP_AUDIO_SDP_COPYMANAGEMENT, + DP_AUDIO_SDP_ISRC, + DP_AUDIO_SDP_MAX, +}; + +enum dp_catalog_audio_header_type { + DP_AUDIO_SDP_HEADER_1, + DP_AUDIO_SDP_HEADER_2, + DP_AUDIO_SDP_HEADER_3, + DP_AUDIO_SDP_HEADER_MAX, +}; + +struct dp_catalog { + u32 aux_data; + u32 total; + u32 sync_start; + u32 width_blanking; + u32 dp_active; + enum dp_catalog_audio_sdp_type sdp_type; + enum dp_catalog_audio_header_type sdp_header; + u32 audio_data; + bool wide_bus_en; +}; + +/* Debug module */ +void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *disp_state); + +/* AUX APIs */ +u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog); +int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog); +int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog); +int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read); +int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog); +void dp_catalog_aux_reset(struct dp_catalog *dp_catalog); +void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog); +int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog); +u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog); + +/* DP Controller APIs */ +void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state); +void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config); +void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb); +void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate, + u32 stream_rate_khz, bool fixed_nvid); +int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern); +u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog); +bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable); +void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog, + u32 intr_mask, bool en); +void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog); +u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog); +u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog); +int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level, + u8 p_level); +int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog); +void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog, + u32 dp_tu, u32 valid_boundary, + u32 valid_boundary2); +void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog, + u32 pattern); +u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog); + +/* DP Panel APIs */ +int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog); +void dp_catalog_dump_regs(struct dp_catalog *dp_catalog); +void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog, + struct drm_display_mode *drm_mode); +void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog); + +struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io); + +/* DP Audio APIs */ +void dp_catalog_audio_get_header(struct dp_catalog *catalog); +void dp_catalog_audio_set_header(struct dp_catalog *catalog); +void dp_catalog_audio_config_acr(struct dp_catalog *catalog); +void dp_catalog_audio_enable(struct dp_catalog *catalog); +void dp_catalog_audio_config_sdp(struct dp_catalog *catalog); +void dp_catalog_audio_init(struct dp_catalog *catalog); +void dp_catalog_audio_sfe_level(struct dp_catalog *catalog); + +#endif /* _DP_CATALOG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c new file mode 100644 index 000000000..103eef9f0 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -0,0 +1,2049 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "dp_reg.h" +#include "dp_ctrl.h" +#include "dp_link.h" + +#define DP_KHZ_TO_HZ 1000 +#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES (30 * HZ / 1000) /* 30 ms */ +#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2) + +#define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0) +#define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3) + +#define MR_LINK_TRAINING1 0x8 +#define MR_LINK_SYMBOL_ERM 0x80 +#define MR_LINK_PRBS7 0x100 +#define MR_LINK_CUSTOM80 0x200 +#define MR_LINK_TRAINING4 0x40 + +enum { + DP_TRAINING_NONE, + DP_TRAINING_1, + DP_TRAINING_2, +}; + +struct dp_tu_calc_input { + u64 lclk; /* 162, 270, 540 and 810 */ + u64 pclk_khz; /* in KHz */ + u64 hactive; /* active h-width */ + u64 hporch; /* bp + fp + pulse */ + int nlanes; /* no.of.lanes */ + int bpp; /* bits */ + int pixel_enc; /* 444, 420, 422 */ + int dsc_en; /* dsc on/off */ + int async_en; /* async mode */ + int fec_en; /* fec */ + int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */ + int num_of_dsc_slices; /* number of slices per line */ +}; + +struct dp_vc_tu_mapping_table { + u32 vic; + u8 lanes; + u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */ + u8 bpp; + u8 valid_boundary_link; + u16 delay_start_link; + bool boundary_moderation_en; + u8 valid_lower_boundary_link; + u8 upper_boundary_count; + u8 lower_boundary_count; + u8 tu_size_minus1; +}; + +struct dp_ctrl_private { + struct dp_ctrl dp_ctrl; + struct drm_device *drm_dev; + struct device *dev; + struct drm_dp_aux *aux; + struct dp_panel *panel; + struct dp_link *link; + struct dp_power *power; + struct dp_parser *parser; + struct dp_catalog *catalog; + + struct completion idle_comp; + struct completion video_comp; +}; + +static int dp_aux_link_configure(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} + +void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + reinit_completion(&ctrl->idle_comp); + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE); + + if (!wait_for_completion_timeout(&ctrl->idle_comp, + IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES)) + pr_warn("PUSH_IDLE pattern timedout\n"); + + drm_dbg_dp(ctrl->drm_dev, "mainlink off\n"); +} + +static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl) +{ + u32 config = 0, tbd; + const u8 *dpcd = ctrl->panel->dpcd; + + /* Default-> LSCLK DIV: 1/4 LCLK */ + config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT); + + /* Scrambler reset enable */ + if (drm_dp_alternate_scrambler_reset_cap(dpcd)) + config |= DP_CONFIGURATION_CTRL_ASSR; + + tbd = dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->dp_mode.bpp); + + if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) { + pr_debug("BIT_DEPTH not set. Configure default\n"); + tbd = DP_TEST_BIT_DEPTH_8; + } + + config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT; + + /* Num of Lanes */ + config |= ((ctrl->link->link_params.num_lanes - 1) + << DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT); + + if (drm_dp_enhanced_frame_cap(dpcd)) + config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING; + + config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */ + + /* sync clock & static Mvid */ + config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN; + config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK; + + dp_catalog_ctrl_config_ctrl(ctrl->catalog, config); +} + +static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl) +{ + u32 cc, tb; + + dp_catalog_ctrl_lane_mapping(ctrl->catalog); + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + + dp_ctrl_config_ctrl(ctrl); + + tb = dp_link_get_test_bits_depth(ctrl->link, + ctrl->panel->dp_mode.bpp); + cc = dp_link_get_colorimetry_config(ctrl->link); + dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb); + dp_panel_timing_cfg(ctrl->panel); +} + +/* + * The structure and few functions present below are IP/Hardware + * specific implementation. Most of the implementation will not + * have coding comments + */ +struct tu_algo_data { + s64 lclk_fp; + s64 pclk_fp; + s64 lwidth; + s64 lwidth_fp; + s64 hbp_relative_to_pclk; + s64 hbp_relative_to_pclk_fp; + int nlanes; + int bpp; + int pixelEnc; + int dsc_en; + int async_en; + int bpc; + + uint delay_start_link_extra_pixclk; + int extra_buffer_margin; + s64 ratio_fp; + s64 original_ratio_fp; + + s64 err_fp; + s64 n_err_fp; + s64 n_n_err_fp; + int tu_size; + int tu_size_desired; + int tu_size_minus1; + + int valid_boundary_link; + s64 resulting_valid_fp; + s64 total_valid_fp; + s64 effective_valid_fp; + s64 effective_valid_recorded_fp; + int n_tus; + int n_tus_per_lane; + int paired_tus; + int remainder_tus; + int remainder_tus_upper; + int remainder_tus_lower; + int extra_bytes; + int filler_size; + int delay_start_link; + + int extra_pclk_cycles; + int extra_pclk_cycles_in_link_clk; + s64 ratio_by_tu_fp; + s64 average_valid2_fp; + int new_valid_boundary_link; + int remainder_symbols_exist; + int n_symbols; + s64 n_remainder_symbols_per_lane_fp; + s64 last_partial_tu_fp; + s64 TU_ratio_err_fp; + + int n_tus_incl_last_incomplete_tu; + int extra_pclk_cycles_tmp; + int extra_pclk_cycles_in_link_clk_tmp; + int extra_required_bytes_new_tmp; + int filler_size_tmp; + int lower_filler_size_tmp; + int delay_start_link_tmp; + + bool boundary_moderation_en; + int boundary_mod_lower_err; + int upper_boundary_count; + int lower_boundary_count; + int i_upper_boundary_count; + int i_lower_boundary_count; + int valid_lower_boundary_link; + int even_distribution_BF; + int even_distribution_legacy; + int even_distribution; + int min_hblank_violated; + s64 delay_start_time_fp; + s64 hbp_time_fp; + s64 hactive_time_fp; + s64 diff_abs_fp; + + s64 ratio; +}; + +static int _tu_param_compare(s64 a, s64 b) +{ + u32 a_sign; + u32 b_sign; + s64 a_temp, b_temp, minus_1; + + if (a == b) + return 0; + + minus_1 = drm_fixp_from_fraction(-1, 1); + + a_sign = (a >> 32) & 0x80000000 ? 1 : 0; + + b_sign = (b >> 32) & 0x80000000 ? 1 : 0; + + if (a_sign > b_sign) + return 2; + else if (b_sign > a_sign) + return 1; + + if (!a_sign && !b_sign) { /* positive */ + if (a > b) + return 1; + else + return 2; + } else { /* negative */ + a_temp = drm_fixp_mul(a, minus_1); + b_temp = drm_fixp_mul(b, minus_1); + + if (a_temp > b_temp) + return 2; + else + return 1; + } +} + +static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in, + struct tu_algo_data *tu) +{ + int nlanes = in->nlanes; + int dsc_num_slices = in->num_of_dsc_slices; + int dsc_num_bytes = 0; + int numerator; + s64 pclk_dsc_fp; + s64 dwidth_dsc_fp; + s64 hbp_dsc_fp; + + int tot_num_eoc_symbols = 0; + int tot_num_hor_bytes = 0; + int tot_num_dummy_bytes = 0; + int dwidth_dsc_bytes = 0; + int eoc_bytes = 0; + + s64 temp1_fp, temp2_fp, temp3_fp; + + tu->lclk_fp = drm_fixp_from_fraction(in->lclk, 1); + tu->pclk_fp = drm_fixp_from_fraction(in->pclk_khz, 1000); + tu->lwidth = in->hactive; + tu->hbp_relative_to_pclk = in->hporch; + tu->nlanes = in->nlanes; + tu->bpp = in->bpp; + tu->pixelEnc = in->pixel_enc; + tu->dsc_en = in->dsc_en; + tu->async_en = in->async_en; + tu->lwidth_fp = drm_fixp_from_fraction(in->hactive, 1); + tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1); + + if (tu->pixelEnc == 420) { + temp1_fp = drm_fixp_from_fraction(2, 1); + tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp); + tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp); + tu->hbp_relative_to_pclk_fp = + drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2); + } + + if (tu->pixelEnc == 422) { + switch (tu->bpp) { + case 24: + tu->bpp = 16; + tu->bpc = 8; + break; + case 30: + tu->bpp = 20; + tu->bpc = 10; + break; + default: + tu->bpp = 16; + tu->bpc = 8; + break; + } + } else { + tu->bpc = tu->bpp/3; + } + + if (!in->dsc_en) + goto fec_check; + + temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100); + temp2_fp = drm_fixp_from_fraction(in->bpp, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp); + + temp1_fp = drm_fixp_from_fraction(8, 1); + temp3_fp = drm_fixp_div(temp2_fp, temp1_fp); + + numerator = drm_fixp2int(temp3_fp); + + dsc_num_bytes = numerator / dsc_num_slices; + eoc_bytes = dsc_num_bytes % nlanes; + tot_num_eoc_symbols = nlanes * dsc_num_slices; + tot_num_hor_bytes = dsc_num_bytes * dsc_num_slices; + tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices; + + if (dsc_num_bytes == 0) + pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes); + + dwidth_dsc_bytes = (tot_num_hor_bytes + + tot_num_eoc_symbols + + (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes)); + + dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3); + + temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp); + temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp); + pclk_dsc_fp = temp1_fp; + + temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp); + temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp); + hbp_dsc_fp = temp2_fp; + + /* output */ + tu->pclk_fp = pclk_dsc_fp; + tu->lwidth_fp = dwidth_dsc_fp; + tu->hbp_relative_to_pclk_fp = hbp_dsc_fp; + +fec_check: + if (in->fec_en) { + temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */ + tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp); + } +} + +static void _tu_valid_boundary_calc(struct tu_algo_data *tu) +{ + s64 temp1_fp, temp2_fp, temp, temp1, temp2; + int compare_result_1, compare_result_2, compare_result_3; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + + tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp = (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link-1)); + tu->average_valid2_fp = drm_fixp_from_fraction(temp, + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + tu->n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu->n_tus += 1; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp); + temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu->n_remainder_symbols_per_lane_fp = temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + tu->last_partial_tu_fp = + drm_fixp_div(tu->n_remainder_symbols_per_lane_fp, + temp1_fp); + + if (tu->n_remainder_symbols_per_lane_fp != 0) + tu->remainder_symbols_exist = 1; + else + tu->remainder_symbols_exist = 0; + + temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes); + tu->n_tus_per_lane = drm_fixp2int(temp1_fp); + + tu->paired_tus = (int)((tu->n_tus_per_lane) / + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count)); + + tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus * + (tu->i_upper_boundary_count + + tu->i_lower_boundary_count); + + if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) { + tu->remainder_tus_upper = tu->i_upper_boundary_count; + tu->remainder_tus_lower = tu->remainder_tus - + tu->i_upper_boundary_count; + } else { + tu->remainder_tus_upper = tu->remainder_tus; + tu->remainder_tus_lower = 0; + } + + temp = tu->paired_tus * (tu->i_upper_boundary_count * + tu->new_valid_boundary_link + + tu->i_lower_boundary_count * + (tu->new_valid_boundary_link - 1)) + + (tu->remainder_tus_upper * + tu->new_valid_boundary_link) + + (tu->remainder_tus_lower * + (tu->new_valid_boundary_link - 1)); + tu->total_valid_fp = drm_fixp_from_fraction(temp, 1); + + if (tu->remainder_symbols_exist) { + temp1_fp = tu->total_valid_fp + + tu->n_remainder_symbols_per_lane_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp2_fp = temp2_fp + tu->last_partial_tu_fp; + temp1_fp = drm_fixp_div(temp1_fp, temp2_fp); + } else { + temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1); + temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp); + } + tu->effective_valid_fp = temp1_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->n_err_fp = tu->average_valid2_fp - temp2_fp; + + tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp); + + if (temp2_fp) + tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp); + else + tu->n_tus_incl_last_incomplete_tu = 0; + + temp1 = 0; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = tu->average_valid2_fp - temp2_fp; + temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + temp1 = drm_fixp2int_ceil(temp1_fp); + + temp = tu->i_upper_boundary_count * tu->nlanes; + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp2_fp) + temp2 = drm_fixp2int_ceil(temp2_fp); + else + temp2 = 0; + tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2); + + temp1_fp = drm_fixp_from_fraction(8, tu->bpp); + temp2_fp = drm_fixp_from_fraction( + tu->extra_required_bytes_new_tmp, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_tmp = 0; + + temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1); + temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_in_link_clk_tmp = + drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_in_link_clk_tmp = 0; + + tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link; + + tu->lower_filler_size_tmp = tu->filler_size_tmp + 1; + + tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp + + tu->lower_filler_size_tmp + + tu->extra_buffer_margin; + + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); + + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp); + if (compare_result_1 == 2) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp); + if (compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + compare_result_3 = _tu_param_compare(tu->hbp_time_fp, + tu->delay_start_time_fp); + if (compare_result_3 == 2) + compare_result_3 = 0; + else + compare_result_3 = 1; + + if (((tu->even_distribution == 1) || + ((tu->even_distribution_BF == 0) && + (tu->even_distribution_legacy == 0))) && + tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 && + compare_result_2 && + (compare_result_1 || (tu->min_hblank_violated == 1)) && + (tu->new_valid_boundary_link - 1) > 0 && + compare_result_3 && + (tu->delay_start_link_tmp <= 1023)) { + tu->upper_boundary_count = tu->i_upper_boundary_count; + tu->lower_boundary_count = tu->i_lower_boundary_count; + tu->err_fp = tu->n_n_err_fp; + tu->boundary_moderation_en = true; + tu->tu_size_desired = tu->tu_size; + tu->valid_boundary_link = tu->new_valid_boundary_link; + tu->effective_valid_recorded_fp = tu->effective_valid_fp; + tu->even_distribution_BF = 1; + tu->delay_start_link = tu->delay_start_link_tmp; + } else if (tu->boundary_mod_lower_err == 0) { + compare_result_1 = _tu_param_compare(tu->n_n_err_fp, + tu->diff_abs_fp); + if (compare_result_1 == 2) + tu->boundary_mod_lower_err = 1; + } +} + +static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl, + struct dp_tu_calc_input *in, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct tu_algo_data *tu; + int compare_result_1, compare_result_2; + u64 temp = 0; + s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0; + + s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */ + s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */ + s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */ + s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000); + + u8 DP_BRUTE_FORCE = 1; + s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */ + uint EXTRA_PIXCLK_CYCLE_DELAY = 4; + uint HBLANK_MARGIN = 4; + + tu = kzalloc(sizeof(*tu), GFP_KERNEL); + if (!tu) + return; + + dp_panel_update_tu_timings(in, tu); + + tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */ + + temp1_fp = drm_fixp_from_fraction(4, 1); + temp2_fp = drm_fixp_mul(temp1_fp, tu->lclk_fp); + temp_fp = drm_fixp_div(temp2_fp, tu->pclk_fp); + tu->extra_buffer_margin = drm_fixp2int_ceil(temp_fp); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = drm_fixp_mul(tu->pclk_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu->ratio_fp = drm_fixp_div(temp2_fp, tu->lclk_fp); + + tu->original_ratio_fp = tu->ratio_fp; + tu->boundary_moderation_en = false; + tu->upper_boundary_count = 0; + tu->lower_boundary_count = 0; + tu->i_upper_boundary_count = 0; + tu->i_lower_boundary_count = 0; + tu->valid_lower_boundary_link = 0; + tu->even_distribution_BF = 0; + tu->even_distribution_legacy = 0; + tu->even_distribution = 0; + tu->delay_start_time_fp = 0; + + tu->err_fp = drm_fixp_from_fraction(1000, 1); + tu->n_err_fp = 0; + tu->n_n_err_fp = 0; + + tu->ratio = drm_fixp2int(tu->ratio_fp); + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + div64_u64_rem(tu->lwidth_fp, temp1_fp, &temp2_fp); + if (temp2_fp != 0 && + !tu->ratio && tu->dsc_en == 0) { + tu->ratio_fp = drm_fixp_mul(tu->ratio_fp, RATIO_SCALE_fp); + tu->ratio = drm_fixp2int(tu->ratio_fp); + if (tu->ratio) + tu->ratio_fp = drm_fixp_from_fraction(1, 1); + } + + if (tu->ratio > 1) + tu->ratio = 1; + + if (tu->ratio == 1) + goto tu_size_calc; + + compare_result_1 = _tu_param_compare(tu->ratio_fp, const_p49_fp); + if (!compare_result_1 || compare_result_1 == 1) + compare_result_1 = 1; + else + compare_result_1 = 0; + + compare_result_2 = _tu_param_compare(tu->ratio_fp, const_p56_fp); + if (!compare_result_2 || compare_result_2 == 2) + compare_result_2 = 1; + else + compare_result_2 = 0; + + if (tu->dsc_en && compare_result_1 && compare_result_2) { + HBLANK_MARGIN += 4; + drm_dbg_dp(ctrl->drm_dev, + "increase HBLANK_MARGIN to %d\n", HBLANK_MARGIN); + } + +tu_size_calc: + for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) { + temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + temp = drm_fixp2int_ceil(temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + tu->n_err_fp = temp1_fp - temp2_fp; + + if (tu->n_err_fp < tu->err_fp) { + tu->err_fp = tu->n_err_fp; + tu->tu_size_desired = tu->tu_size; + } + } + + tu->tu_size_minus1 = tu->tu_size_desired - 1; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + tu->valid_boundary_link = drm_fixp2int_ceil(temp2_fp); + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = tu->lwidth_fp; + temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1); + temp2_fp = drm_fixp_div(temp2_fp, temp1_fp); + tu->n_tus = drm_fixp2int(temp2_fp); + if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000) + tu->n_tus += 1; + + tu->even_distribution_legacy = tu->n_tus % tu->nlanes == 0 ? 1 : 0; + + drm_dbg_dp(ctrl->drm_dev, + "n_sym = %d, num_of_tus = %d\n", + tu->valid_boundary_link, tu->n_tus); + + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1); + temp2_fp = temp1_fp - temp2_fp; + temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + temp = drm_fixp2int(temp2_fp); + if (temp && temp2_fp) + tu->extra_bytes = drm_fixp2int_ceil(temp2_fp); + else + tu->extra_bytes = 0; + + temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1); + temp2_fp = drm_fixp_from_fraction(8, tu->bpp); + temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp); + + if (temp && temp1_fp) + tu->extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles = drm_fixp2int(temp1_fp); + + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp); + else + tu->extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp); + + tu->filler_size = tu->tu_size_desired - tu->valid_boundary_link; + + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + tu->ratio_by_tu_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp); + + tu->delay_start_link = tu->extra_pclk_cycles_in_link_clk + + tu->filler_size + tu->extra_buffer_margin; + + tu->resulting_valid_fp = + drm_fixp_from_fraction(tu->valid_boundary_link, 1); + + temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = drm_fixp_div(tu->resulting_valid_fp, temp1_fp); + tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp; + + temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1); + temp1_fp = tu->hbp_relative_to_pclk_fp - temp1_fp; + tu->hbp_time_fp = drm_fixp_div(temp1_fp, tu->pclk_fp); + + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); + + compare_result_1 = _tu_param_compare(tu->hbp_time_fp, + tu->delay_start_time_fp); + if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */ + tu->min_hblank_violated = 1; + + tu->hactive_time_fp = drm_fixp_div(tu->lwidth_fp, tu->pclk_fp); + + compare_result_2 = _tu_param_compare(tu->hactive_time_fp, + tu->delay_start_time_fp); + if (compare_result_2 == 2) + tu->min_hblank_violated = 1; + + tu->delay_start_time_fp = 0; + + /* brute force */ + + tu->delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY; + tu->diff_abs_fp = tu->resulting_valid_fp - tu->ratio_by_tu_fp; + + temp = drm_fixp2int(tu->diff_abs_fp); + if (!temp && tu->diff_abs_fp <= 0xffff) + tu->diff_abs_fp = 0; + + /* if(diff_abs < 0) diff_abs *= -1 */ + if (tu->diff_abs_fp < 0) + tu->diff_abs_fp = drm_fixp_mul(tu->diff_abs_fp, -1); + + tu->boundary_mod_lower_err = 0; + if ((tu->diff_abs_fp != 0 && + ((tu->diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) || + (tu->even_distribution_legacy == 0) || + (DP_BRUTE_FORCE == 1))) || + (tu->min_hblank_violated == 1)) { + do { + tu->err_fp = drm_fixp_from_fraction(1000, 1); + + temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp); + temp2_fp = drm_fixp_from_fraction( + tu->delay_start_link_extra_pixclk, 1); + temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp); + + if (temp1_fp) + tu->extra_buffer_margin = + drm_fixp2int_ceil(temp1_fp); + else + tu->extra_buffer_margin = 0; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp); + + if (temp1_fp) + tu->n_symbols = drm_fixp2int_ceil(temp1_fp); + else + tu->n_symbols = 0; + + for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) { + for (tu->i_upper_boundary_count = 1; + tu->i_upper_boundary_count <= 15; + tu->i_upper_boundary_count++) { + for (tu->i_lower_boundary_count = 1; + tu->i_lower_boundary_count <= 15; + tu->i_lower_boundary_count++) { + _tu_valid_boundary_calc(tu); + } + } + } + tu->delay_start_link_extra_pixclk--; + } while (tu->boundary_moderation_en != true && + tu->boundary_mod_lower_err == 1 && + tu->delay_start_link_extra_pixclk != 0); + + if (tu->boundary_moderation_en == true) { + temp1_fp = drm_fixp_from_fraction( + (tu->upper_boundary_count * + tu->valid_boundary_link + + tu->lower_boundary_count * + (tu->valid_boundary_link - 1)), 1); + temp2_fp = drm_fixp_from_fraction( + (tu->upper_boundary_count + + tu->lower_boundary_count), 1); + tu->resulting_valid_fp = + drm_fixp_div(temp1_fp, temp2_fp); + + temp1_fp = drm_fixp_from_fraction( + tu->tu_size_desired, 1); + tu->ratio_by_tu_fp = + drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + + tu->valid_lower_boundary_link = + tu->valid_boundary_link - 1; + + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp); + temp2_fp = drm_fixp_div(temp1_fp, + tu->resulting_valid_fp); + tu->n_tus = drm_fixp2int(temp2_fp); + + tu->tu_size_minus1 = tu->tu_size_desired - 1; + tu->even_distribution_BF = 1; + + temp1_fp = + drm_fixp_from_fraction(tu->tu_size_desired, 1); + temp2_fp = + drm_fixp_div(tu->resulting_valid_fp, temp1_fp); + tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp; + } + } + + temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu->lwidth_fp); + + if (temp2_fp) + temp = drm_fixp2int_ceil(temp2_fp); + else + temp = 0; + + temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1); + temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp); + temp1_fp = drm_fixp_from_fraction(tu->bpp, 8); + temp2_fp = drm_fixp_div(temp1_fp, temp2_fp); + temp1_fp = drm_fixp_from_fraction(temp, 1); + temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp); + temp = drm_fixp2int(temp2_fp); + + if (tu->async_en) + tu->delay_start_link += (int)temp; + + temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1); + tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp); + + /* OUTPUTS */ + tu_table->valid_boundary_link = tu->valid_boundary_link; + tu_table->delay_start_link = tu->delay_start_link; + tu_table->boundary_moderation_en = tu->boundary_moderation_en; + tu_table->valid_lower_boundary_link = tu->valid_lower_boundary_link; + tu_table->upper_boundary_count = tu->upper_boundary_count; + tu_table->lower_boundary_count = tu->lower_boundary_count; + tu_table->tu_size_minus1 = tu->tu_size_minus1; + + drm_dbg_dp(ctrl->drm_dev, "TU: valid_boundary_link: %d\n", + tu_table->valid_boundary_link); + drm_dbg_dp(ctrl->drm_dev, "TU: delay_start_link: %d\n", + tu_table->delay_start_link); + drm_dbg_dp(ctrl->drm_dev, "TU: boundary_moderation_en: %d\n", + tu_table->boundary_moderation_en); + drm_dbg_dp(ctrl->drm_dev, "TU: valid_lower_boundary_link: %d\n", + tu_table->valid_lower_boundary_link); + drm_dbg_dp(ctrl->drm_dev, "TU: upper_boundary_count: %d\n", + tu_table->upper_boundary_count); + drm_dbg_dp(ctrl->drm_dev, "TU: lower_boundary_count: %d\n", + tu_table->lower_boundary_count); + drm_dbg_dp(ctrl->drm_dev, "TU: tu_size_minus1: %d\n", + tu_table->tu_size_minus1); + + kfree(tu); +} + +static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl, + struct dp_vc_tu_mapping_table *tu_table) +{ + struct dp_tu_calc_input in; + struct drm_display_mode *drm_mode; + + drm_mode = &ctrl->panel->dp_mode.drm_mode; + + in.lclk = ctrl->link->link_params.rate / 1000; + in.pclk_khz = drm_mode->clock; + in.hactive = drm_mode->hdisplay; + in.hporch = drm_mode->htotal - drm_mode->hdisplay; + in.nlanes = ctrl->link->link_params.num_lanes; + in.bpp = ctrl->panel->dp_mode.bpp; + in.pixel_enc = 444; + in.dsc_en = 0; + in.async_en = 0; + in.fec_en = 0; + in.num_of_dsc_slices = 0; + in.compress_ratio = 100; + + _dp_ctrl_calc_tu(ctrl, &in, tu_table); +} + +static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl) +{ + u32 dp_tu = 0x0; + u32 valid_boundary = 0x0; + u32 valid_boundary2 = 0x0; + struct dp_vc_tu_mapping_table tu_calc_table; + + dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table); + + dp_tu |= tu_calc_table.tu_size_minus1; + valid_boundary |= tu_calc_table.valid_boundary_link; + valid_boundary |= (tu_calc_table.delay_start_link << 16); + + valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1); + valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16); + valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20); + + if (tu_calc_table.boundary_moderation_en) + valid_boundary2 |= BIT(0); + + pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n", + dp_tu, valid_boundary, valid_boundary2); + + dp_catalog_ctrl_update_transfer_unit(ctrl->catalog, + dp_tu, valid_boundary, valid_boundary2); +} + +static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + if (!wait_for_completion_timeout(&ctrl->video_comp, + WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) { + DRM_ERROR("wait4video timedout\n"); + ret = -ETIMEDOUT; + } + return ret; +} + +static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl) +{ + struct dp_link *link = ctrl->link; + int ret = 0, lane, lane_cnt; + u8 buf[4]; + u32 max_level_reached = 0; + u32 voltage_swing_level = link->phy_params.v_level; + u32 pre_emphasis_level = link->phy_params.p_level; + + drm_dbg_dp(ctrl->drm_dev, + "voltage level: %d emphasis level: %d\n", + voltage_swing_level, pre_emphasis_level); + ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog, + voltage_swing_level, pre_emphasis_level); + + if (ret) + return ret; + + if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) { + drm_dbg_dp(ctrl->drm_dev, + "max. voltage swing level reached %d\n", + voltage_swing_level); + max_level_reached |= DP_TRAIN_MAX_SWING_REACHED; + } + + if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) { + drm_dbg_dp(ctrl->drm_dev, + "max. pre-emphasis level reached %d\n", + pre_emphasis_level); + max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } + + pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + lane_cnt = ctrl->link->link_params.num_lanes; + for (lane = 0; lane < lane_cnt; lane++) + buf[lane] = voltage_swing_level | pre_emphasis_level + | max_level_reached; + + drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n", + voltage_swing_level | pre_emphasis_level); + ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET, + buf, lane_cnt); + if (ret == lane_cnt) + ret = 0; + + return ret; +} + +static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl, + u8 pattern) +{ + u8 buf; + int ret = 0; + + drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern); + + buf = pattern; + + if (pattern && pattern != DP_TRAINING_PATTERN_4) + buf |= DP_LINK_SCRAMBLING_DISABLE; + + ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf); + return ret == 1; +} + +static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl, + u8 *link_status) +{ + int ret = 0, len; + + len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); + if (len != DP_LINK_STATUS_SIZE) { + DRM_ERROR("DP link status read failed, err: %d\n", len); + ret = -EINVAL; + } + + return ret; +} + +static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl, + int *training_step) +{ + int tries, old_v_level, ret = 0; + u8 link_status[DP_LINK_STATUS_SIZE]; + int const maximum_retries = 4; + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + *training_step = DP_TRAINING_1; + + ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, 1); + if (ret) + return ret; + dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE); + + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + for (tries = 0; tries < maximum_retries; tries++) { + drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + return ret; + + if (drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.num_lanes)) { + return 0; + } + + if (ctrl->link->phy_params.v_level >= + DP_TRAIN_VOLTAGE_SWING_MAX) { + DRM_ERROR_RATELIMITED("max v_level reached\n"); + return -EAGAIN; + } + + if (old_v_level != ctrl->link->phy_params.v_level) { + tries = 0; + old_v_level = ctrl->link->phy_params.v_level; + } + + dp_link_adjust_levels(ctrl->link, link_status); + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + } + + DRM_ERROR("max tries reached\n"); + return -ETIMEDOUT; +} + +static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + + switch (ctrl->link->link_params.rate) { + case 810000: + ctrl->link->link_params.rate = 540000; + break; + case 540000: + ctrl->link->link_params.rate = 270000; + break; + case 270000: + ctrl->link->link_params.rate = 162000; + break; + case 162000: + default: + ret = -EINVAL; + break; + } + + if (!ret) { + drm_dbg_dp(ctrl->drm_dev, "new rate=0x%x\n", + ctrl->link->link_params.rate); + } + + return ret; +} + +static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl) +{ + + if (ctrl->link->link_params.num_lanes == 1) + return -1; + + ctrl->link->link_params.num_lanes /= 2; + ctrl->link->link_params.rate = ctrl->panel->link_info.rate; + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + return 0; +} + +static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl) +{ + dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE); + drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); +} + +static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, + int *training_step) +{ + int tries = 0, ret = 0; + u8 pattern; + u32 state_ctrl_bit; + int const maximum_retries = 5; + u8 link_status[DP_LINK_STATUS_SIZE]; + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + *training_step = DP_TRAINING_2; + + if (drm_dp_tps4_supported(ctrl->panel->dpcd)) { + pattern = DP_TRAINING_PATTERN_4; + state_ctrl_bit = 4; + } else if (drm_dp_tps3_supported(ctrl->panel->dpcd)) { + pattern = DP_TRAINING_PATTERN_3; + state_ctrl_bit = 3; + } else { + pattern = DP_TRAINING_PATTERN_2; + state_ctrl_bit = 2; + } + + ret = dp_catalog_ctrl_set_pattern_state_bit(ctrl->catalog, state_ctrl_bit); + if (ret) + return ret; + + dp_ctrl_train_pattern_set(ctrl, pattern); + + for (tries = 0; tries <= maximum_retries; tries++) { + drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); + + ret = dp_ctrl_read_link_status(ctrl, link_status); + if (ret) + return ret; + + if (drm_dp_channel_eq_ok(link_status, + ctrl->link->link_params.num_lanes)) { + return 0; + } + + dp_link_adjust_levels(ctrl->link, link_status); + ret = dp_ctrl_update_vx_px(ctrl); + if (ret) + return ret; + + } + + return -ETIMEDOUT; +} + +static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, + int *training_step) +{ + int ret = 0; + const u8 *dpcd = ctrl->panel->dpcd; + u8 encoding[] = { 0, DP_SET_ANSI_8B10B }; + u8 assr; + struct dp_link_info link_info = {0}; + + dp_ctrl_config_ctrl(ctrl); + + link_info.num_lanes = ctrl->link->link_params.num_lanes; + link_info.rate = ctrl->link->link_params.rate; + link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING; + + dp_aux_link_configure(ctrl->aux, &link_info); + + if (drm_dp_max_downspread(dpcd)) + encoding[0] |= DP_SPREAD_AMP_0_5; + + /* config DOWNSPREAD_CTRL and MAIN_LINK_CHANNEL_CODING_SET */ + drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, encoding, 2); + + if (drm_dp_alternate_scrambler_reset_cap(dpcd)) { + assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; + drm_dp_dpcd_write(ctrl->aux, DP_EDP_CONFIGURATION_SET, + &assr, 1); + } + + ret = dp_ctrl_link_train_1(ctrl, training_step); + if (ret) { + DRM_ERROR("link training #1 failed. ret=%d\n", ret); + goto end; + } + + /* print success info as this is a result of user initiated action */ + drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n"); + + ret = dp_ctrl_link_train_2(ctrl, training_step); + if (ret) { + DRM_ERROR("link training #2 failed. ret=%d\n", ret); + goto end; + } + + /* print success info as this is a result of user initiated action */ + drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n"); + +end: + dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0); + + return ret; +} + +static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, + int *training_step) +{ + int ret = 0; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true); + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + return ret; + + /* + * As part of previous calls, DP controller state might have + * transitioned to PUSH_IDLE. In order to start transmitting + * a link training pattern, we have to first do soft reset. + */ + + ret = dp_ctrl_link_train(ctrl, training_step); + + return ret; +} + +static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl, + enum dp_pm_type module, char *name, unsigned long rate) +{ + u32 num = ctrl->parser->mp[module].num_clk; + struct clk_bulk_data *cfg = ctrl->parser->mp[module].clocks; + + while (num && strcmp(cfg->id, name)) { + num--; + cfg++; + } + + drm_dbg_dp(ctrl->drm_dev, "setting rate=%lu on clk=%s\n", + rate, name); + + if (num) + clk_set_rate(cfg->clk, rate); + else + DRM_ERROR("%s clock doesn't exit to set rate %lu\n", + name, rate); +} + +static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_io *dp_io = &ctrl->parser->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + const u8 *dpcd = ctrl->panel->dpcd; + + opts_dp->lanes = ctrl->link->link_params.num_lanes; + opts_dp->link_rate = ctrl->link->link_params.rate / 100; + opts_dp->ssc = drm_dp_max_downspread(dpcd); + + phy_configure(phy, &dp_io->phy_opts); + phy_power_on(phy); + + dev_pm_opp_set_rate(ctrl->dev, ctrl->link->link_params.rate * 1000); + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true); + if (ret) + DRM_ERROR("Unable to start link clocks. ret=%d\n", ret); + + drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate); + + return ret; +} + +void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable) +{ + struct dp_ctrl_private *ctrl; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + dp_catalog_ctrl_reset(ctrl->catalog); + + /* + * all dp controller programmable registers will not + * be reset to default value after DP_SW_RESET + * therefore interrupt mask bits have to be updated + * to enable/disable interrupts + */ + dp_catalog_ctrl_enable_irq(ctrl->catalog, enable); +} + +void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_phy_reset(ctrl->catalog); + phy_init(phy); + + drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", + phy, phy->init_count, phy->power_count); +} + +void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_phy_reset(ctrl->catalog); + phy_exit(phy); + drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", + phy, phy->init_count, phy->power_count); +} + +static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl) +{ + const u8 *dpcd = ctrl->panel->dpcd; + + /* + * For better interop experience, used a fixed NVID=0x8000 + * whenever connected to a VGA dongle downstream. + */ + if (drm_dp_is_branch(dpcd)) + return (drm_dp_has_quirk(&ctrl->panel->desc, + DP_DPCD_QUIRK_CONSTANT_N)); + + return false; +} + +static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + struct dp_io *dp_io = &ctrl->parser->io; + struct phy *phy = dp_io->phy; + struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + opts_dp->lanes = ctrl->link->link_params.num_lanes; + phy_configure(phy, &dp_io->phy_opts); + /* + * Disable and re-enable the mainlink clock since the + * link clock might have been adjusted as part of the + * link maintenance. + */ + dev_pm_opp_set_rate(ctrl->dev, 0); + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable clocks. ret=%d\n", ret); + return ret; + } + phy_power_off(phy); + /* hw recommended delay before re-enabling clocks */ + msleep(20); + + ret = dp_ctrl_enable_mainlink_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret); + return ret; + } + + return ret; +} + +static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl) +{ + struct dp_io *dp_io; + struct phy *phy; + int ret; + + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + dp_catalog_ctrl_reset(ctrl->catalog); + + dev_pm_opp_set_rate(ctrl->dev, 0); + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + } + + phy_power_off(phy); + + /* aux channel down, reinit phy */ + phy_exit(phy); + phy_init(phy); + + drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", + phy, phy->init_count, phy->power_count); + return 0; +} + +static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) +{ + int ret = 0; + int training_step = DP_TRAINING_NONE; + + dp_ctrl_push_idle(&ctrl->dp_ctrl); + + ctrl->link->phy_params.p_level = 0; + ctrl->link->phy_params.v_level = 0; + + ret = dp_ctrl_setup_main_link(ctrl, &training_step); + if (ret) + goto end; + + dp_ctrl_clear_training_pattern(ctrl); + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + + ret = dp_ctrl_wait4video_ready(ctrl); +end: + return ret; +} + +static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) +{ + bool success = false; + u32 pattern_sent = 0x0; + u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel; + + drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested); + + if (dp_catalog_ctrl_update_vx_px(ctrl->catalog, + ctrl->link->phy_params.v_level, + ctrl->link->phy_params.p_level)) { + DRM_ERROR("Failed to set v/p levels\n"); + return false; + } + dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested); + dp_ctrl_update_vx_px(ctrl); + dp_link_send_test_response(ctrl->link); + + pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog); + + switch (pattern_sent) { + case MR_LINK_TRAINING1: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_D10_2); + break; + case MR_LINK_SYMBOL_ERM: + success = ((pattern_requested == + DP_PHY_TEST_PATTERN_ERROR_COUNT) || + (pattern_requested == + DP_PHY_TEST_PATTERN_CP2520)); + break; + case MR_LINK_PRBS7: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_PRBS7); + break; + case MR_LINK_CUSTOM80: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_80BIT_CUSTOM); + break; + case MR_LINK_TRAINING4: + success = (pattern_requested == + DP_PHY_TEST_PATTERN_SEL_MASK); + break; + default: + success = false; + } + + drm_dbg_dp(ctrl->drm_dev, "%s: test->0x%x\n", + success ? "success" : "failed", pattern_requested); + return success; +} + +static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) +{ + int ret; + unsigned long pixel_rate; + + if (!ctrl->link->phy_params.phy_test_pattern_sel) { + drm_dbg_dp(ctrl->drm_dev, + "no test pattern selected by sink\n"); + return 0; + } + + /* + * The global reset will need DP link related clocks to be + * running. Add the global reset just before disabling the + * link clocks and core clocks. + */ + ret = dp_ctrl_off(&ctrl->dp_ctrl); + if (ret) { + DRM_ERROR("failed to disable DP controller\n"); + return ret; + } + + ret = dp_ctrl_on_link(&ctrl->dp_ctrl); + if (ret) { + DRM_ERROR("failed to enable DP link controller\n"); + return ret; + } + + pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true); + if (ret) { + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); + return ret; + } + + dp_ctrl_send_phy_test_pattern(ctrl); + + return 0; +} + +void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + u32 sink_request = 0x0; + + if (!dp_ctrl) { + DRM_ERROR("invalid input\n"); + return; + } + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + sink_request = ctrl->link->sink_request; + + if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n"); + if (dp_ctrl_process_phy_test_request(ctrl)) { + DRM_ERROR("process phy_test_req failed\n"); + return; + } + } + + if (sink_request & DP_LINK_STATUS_UPDATED) { + if (dp_ctrl_link_maintenance(ctrl)) { + DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); + return; + } + } + + if (sink_request & DP_TEST_LINK_TRAINING) { + dp_link_send_test_response(ctrl->link); + if (dp_ctrl_link_maintenance(ctrl)) { + DRM_ERROR("LM failed: TEST_LINK_TRAINING\n"); + return; + } + } +} + +static bool dp_ctrl_clock_recovery_any_ok( + const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count) +{ + int reduced_cnt; + + if (lane_count <= 1) + return false; + + /* + * only interested in the lane number after reduced + * lane_count = 4, then only interested in 2 lanes + * lane_count = 2, then only interested in 1 lane + */ + reduced_cnt = lane_count >> 1; + + return drm_dp_clock_recovery_ok(link_status, reduced_cnt); +} + +static bool dp_ctrl_channel_eq_ok(struct dp_ctrl_private *ctrl) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + int num_lanes = ctrl->link->link_params.num_lanes; + + dp_ctrl_read_link_status(ctrl, link_status); + + return drm_dp_channel_eq_ok(link_status, num_lanes); +} + +int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) +{ + int rc = 0; + struct dp_ctrl_private *ctrl; + u32 rate; + int link_train_max_retries = 5; + u32 const phy_cts_pixel_clk_khz = 148500; + u8 link_status[DP_LINK_STATUS_SIZE]; + unsigned int training_step; + unsigned long pixel_rate; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + rate = ctrl->panel->link_info.rate; + pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + + dp_power_clk_enable(ctrl->power, DP_CORE_PM, true); + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { + drm_dbg_dp(ctrl->drm_dev, + "using phy test link parameters\n"); + if (!pixel_rate) + pixel_rate = phy_cts_pixel_clk_khz; + } else { + ctrl->link->link_params.rate = rate; + ctrl->link->link_params.num_lanes = + ctrl->panel->link_info.num_lanes; + } + + drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n", + ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes, + pixel_rate); + + rc = dp_ctrl_enable_mainlink_clocks(ctrl); + if (rc) + return rc; + + while (--link_train_max_retries) { + training_step = DP_TRAINING_NONE; + rc = dp_ctrl_setup_main_link(ctrl, &training_step); + if (rc == 0) { + /* training completed successfully */ + break; + } else if (training_step == DP_TRAINING_1) { + /* link train_1 failed */ + if (!dp_catalog_link_is_connected(ctrl->catalog)) + break; + + dp_ctrl_read_link_status(ctrl, link_status); + + rc = dp_ctrl_link_rate_down_shift(ctrl); + if (rc < 0) { /* already in RBR = 1.6G */ + if (dp_ctrl_clock_recovery_any_ok(link_status, + ctrl->link->link_params.num_lanes)) { + /* + * some lanes are ready, + * reduce lane number + */ + rc = dp_ctrl_link_lane_down_shift(ctrl); + if (rc < 0) { /* lane == 1 already */ + /* end with failure */ + break; + } + } else { + /* end with failure */ + break; /* lane == 1 already */ + } + } + } else if (training_step == DP_TRAINING_2) { + /* link train_2 failed */ + if (!dp_catalog_link_is_connected(ctrl->catalog)) + break; + + dp_ctrl_read_link_status(ctrl, link_status); + + if (!drm_dp_clock_recovery_ok(link_status, + ctrl->link->link_params.num_lanes)) + rc = dp_ctrl_link_rate_down_shift(ctrl); + else + rc = dp_ctrl_link_lane_down_shift(ctrl); + + if (rc < 0) { + /* end with failure */ + break; /* lane == 1 already */ + } + + /* stop link training before start re training */ + dp_ctrl_clear_training_pattern(ctrl); + } + + rc = dp_ctrl_reinitialize_mainlink(ctrl); + if (rc) { + DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc); + break; + } + } + + if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) + return rc; + + if (rc == 0) { /* link train successfully */ + /* + * do not stop train pattern here + * stop link training at on_stream + * to pass compliance test + */ + } else { + /* + * link training failed + * end txing train pattern here + */ + dp_ctrl_clear_training_pattern(ctrl); + + dp_ctrl_deinitialize_mainlink(ctrl); + rc = -ECONNRESET; + } + + return rc; +} + +static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) +{ + int training_step = DP_TRAINING_NONE; + + return dp_ctrl_setup_main_link(ctrl, &training_step); +} + +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) +{ + int ret = 0; + bool mainlink_ready = false; + struct dp_ctrl_private *ctrl; + unsigned long pixel_rate; + unsigned long pixel_rate_orig; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock; + + if (dp_ctrl->wide_bus_en) + pixel_rate >>= 1; + + drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n", + ctrl->link->link_params.rate, + ctrl->link->link_params.num_lanes, pixel_rate); + + if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */ + ret = dp_ctrl_enable_mainlink_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to start link clocks. ret=%d\n", ret); + goto end; + } + } + + dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true); + if (ret) { + DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret); + goto end; + } + + if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl)) + dp_ctrl_link_retrain(ctrl); + + /* stop txing train pattern to end link training */ + dp_ctrl_clear_training_pattern(ctrl); + + /* + * Set up transfer unit values and set controller state to send + * video. + */ + reinit_completion(&ctrl->video_comp); + + dp_ctrl_configure_source_params(ctrl); + + dp_catalog_ctrl_config_msa(ctrl->catalog, + ctrl->link->link_params.rate, + pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl)); + + dp_ctrl_setup_tr_unit(ctrl); + + dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO); + + ret = dp_ctrl_wait4video_ready(ctrl); + if (ret) + return ret; + + mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog); + drm_dbg_dp(ctrl->drm_dev, + "mainlink %s\n", mainlink_ready ? "READY" : "NOT READY"); + +end: + return ret; +} + +int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + int ret; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + /* set dongle to D3 (power off) mode */ + dp_link_psm_config(ctrl->link, &ctrl->panel->link_info, true); + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + if (dp_power_clk_status(ctrl->power, DP_STREAM_PM)) { + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false); + if (ret) { + DRM_ERROR("Failed to disable pclk. ret=%d\n", ret); + return ret; + } + } + + dev_pm_opp_set_rate(ctrl->dev, 0); + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + return ret; + } + + phy_power_off(phy); + + /* aux channel down, reinit phy */ + phy_exit(phy); + phy_init(phy); + + drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", + phy, phy->init_count, phy->power_count); + return ret; +} + +int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + int ret; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + } + + DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n", + phy, phy->init_count, phy->power_count); + + phy_power_off(phy); + + DRM_DEBUG_DP("After, phy=%p init_count=%d power_on=%d\n", + phy, phy->init_count, phy->power_count); + + return ret; +} + +int dp_ctrl_off(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + struct dp_io *dp_io; + struct phy *phy; + int ret = 0; + + if (!dp_ctrl) + return -EINVAL; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + dp_io = &ctrl->parser->io; + phy = dp_io->phy; + + dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false); + + dp_catalog_ctrl_reset(ctrl->catalog); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false); + if (ret) + DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret); + + dev_pm_opp_set_rate(ctrl->dev, 0); + ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false); + if (ret) { + DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret); + } + + phy_power_off(phy); + drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n", + phy, phy->init_count, phy->power_count); + + return ret; +} + +void dp_ctrl_isr(struct dp_ctrl *dp_ctrl) +{ + struct dp_ctrl_private *ctrl; + u32 isr; + + if (!dp_ctrl) + return; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog); + + if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) { + drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n"); + complete(&ctrl->video_comp); + } + + if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) { + drm_dbg_dp(ctrl->drm_dev, "idle_patterns_sent\n"); + complete(&ctrl->idle_comp); + } +} + +struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, + struct dp_panel *panel, struct drm_dp_aux *aux, + struct dp_power *power, struct dp_catalog *catalog, + struct dp_parser *parser) +{ + struct dp_ctrl_private *ctrl; + int ret; + + if (!dev || !panel || !aux || + !link || !catalog) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) { + DRM_ERROR("Mem allocation failure\n"); + return ERR_PTR(-ENOMEM); + } + + ret = devm_pm_opp_set_clkname(dev, "ctrl_link"); + if (ret) { + dev_err(dev, "invalid DP OPP table in device tree\n"); + /* caller do PTR_ERR(opp_table) */ + return (struct dp_ctrl *)ERR_PTR(ret); + } + + /* OPP table is optional */ + ret = devm_pm_opp_of_add_table(dev); + if (ret) + dev_err(dev, "failed to add DP OPP table\n"); + + init_completion(&ctrl->idle_comp); + init_completion(&ctrl->video_comp); + + /* in parameters */ + ctrl->parser = parser; + ctrl->panel = panel; + ctrl->power = power; + ctrl->aux = aux; + ctrl->link = link; + ctrl->catalog = catalog; + ctrl->dev = dev; + + return &ctrl->dp_ctrl; +} diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h new file mode 100644 index 000000000..9f29734af --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_CTRL_H_ +#define _DP_CTRL_H_ + +#include "dp_aux.h" +#include "dp_panel.h" +#include "dp_link.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" + +struct dp_ctrl { + bool orientation; + atomic_t aborted; + bool wide_bus_en; +}; + +int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train); +int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); +int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl); +int dp_ctrl_off(struct dp_ctrl *dp_ctrl); +void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl); +void dp_ctrl_isr(struct dp_ctrl *dp_ctrl); +void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl); +struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link, + struct dp_panel *panel, struct drm_dp_aux *aux, + struct dp_power *power, struct dp_catalog *catalog, + struct dp_parser *parser); + +void dp_ctrl_reset_irq_ctrl(struct dp_ctrl *dp_ctrl, bool enable); +void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl); +void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl); +void dp_ctrl_irq_phy_exit(struct dp_ctrl *dp_ctrl); + +#endif /* _DP_CTRL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c new file mode 100644 index 000000000..5e35033ba --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__ + +#include +#include +#include + +#include "dp_parser.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_ctrl.h" +#include "dp_debug.h" +#include "dp_display.h" + +#define DEBUG_NAME "msm_dp" + +struct dp_debug_private { + struct dentry *root; + + struct dp_usbpd *usbpd; + struct dp_link *link; + struct dp_panel *panel; + struct drm_connector *connector; + struct device *dev; + struct drm_device *drm_dev; + + struct dp_debug dp_debug; +}; + +static int dp_debug_show(struct seq_file *seq, void *p) +{ + struct dp_debug_private *debug = seq->private; + u64 lclk = 0; + u32 link_params_rate; + const struct drm_display_mode *drm_mode; + + if (!debug) + return -ENODEV; + + drm_mode = &debug->panel->dp_mode.drm_mode; + + seq_printf(seq, "\tname = %s\n", DEBUG_NAME); + seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n", + debug->panel->link_info.rate); + seq_printf(seq, "\t\tnum_lanes = %u\n", + debug->panel->link_info.num_lanes); + seq_printf(seq, "\t\tcapabilities = %lu\n", + debug->panel->link_info.capabilities); + seq_printf(seq, "\tdp_panel_info:\n\t\tactive = %dx%d\n", + drm_mode->hdisplay, + drm_mode->vdisplay); + seq_printf(seq, "\t\tback_porch = %dx%d\n", + drm_mode->htotal - drm_mode->hsync_end, + drm_mode->vtotal - drm_mode->vsync_end); + seq_printf(seq, "\t\tfront_porch = %dx%d\n", + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->vsync_start - drm_mode->vdisplay); + seq_printf(seq, "\t\tsync_width = %dx%d\n", + drm_mode->hsync_end - drm_mode->hsync_start, + drm_mode->vsync_end - drm_mode->vsync_start); + seq_printf(seq, "\t\tactive_low = %dx%d\n", + debug->panel->dp_mode.h_active_low, + debug->panel->dp_mode.v_active_low); + seq_printf(seq, "\t\th_skew = %d\n", + drm_mode->hskew); + seq_printf(seq, "\t\trefresh rate = %d\n", + drm_mode_vrefresh(drm_mode)); + seq_printf(seq, "\t\tpixel clock khz = %d\n", + drm_mode->clock); + seq_printf(seq, "\t\tbpp = %d\n", + debug->panel->dp_mode.bpp); + + /* Link Information */ + seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n", + debug->link->sink_request); + seq_printf(seq, "\t\tnum_lanes = %d\n", + debug->link->link_params.num_lanes); + link_params_rate = debug->link->link_params.rate; + seq_printf(seq, "\t\tbw_code = %d\n", + drm_dp_link_rate_to_bw_code(link_params_rate)); + lclk = debug->link->link_params.rate * 1000; + seq_printf(seq, "\t\tlclk = %lld\n", lclk); + seq_printf(seq, "\t\tv_level = %d\n", + debug->link->phy_params.v_level); + seq_printf(seq, "\t\tp_level = %d\n", + debug->link->phy_params.p_level); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dp_debug); + +static int dp_test_data_show(struct seq_file *m, void *data) +{ + const struct dp_debug_private *debug = m->private; + const struct drm_connector *connector = debug->connector; + u32 bpc; + + if (connector->status == connector_status_connected) { + bpc = debug->link->test_video.test_bit_depth; + seq_printf(m, "hdisplay: %d\n", + debug->link->test_video.test_h_width); + seq_printf(m, "vdisplay: %d\n", + debug->link->test_video.test_v_height); + seq_printf(m, "bpc: %u\n", + dp_link_bit_depth_to_bpc(bpc)); + } else { + seq_puts(m, "0"); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dp_test_data); + +static int dp_test_type_show(struct seq_file *m, void *data) +{ + const struct dp_debug_private *debug = m->private; + const struct drm_connector *connector = debug->connector; + + if (connector->status == connector_status_connected) + seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN); + else + seq_puts(m, "0"); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dp_test_type); + +static ssize_t dp_test_active_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + char *input_buffer; + int status = 0; + const struct dp_debug_private *debug; + const struct drm_connector *connector; + int val = 0; + + debug = ((struct seq_file *)file->private_data)->private; + connector = debug->connector; + + if (len == 0) + return 0; + + input_buffer = memdup_user_nul(ubuf, len); + if (IS_ERR(input_buffer)) + return PTR_ERR(input_buffer); + + DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); + + if (connector->status == connector_status_connected) { + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) { + kfree(input_buffer); + return status; + } + DRM_DEBUG_DRIVER("Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + debug->panel->video_test = true; + else + debug->panel->video_test = false; + } + kfree(input_buffer); + + *offp += len; + return len; +} + +static int dp_test_active_show(struct seq_file *m, void *data) +{ + struct dp_debug_private *debug = m->private; + struct drm_connector *connector = debug->connector; + + if (connector->status == connector_status_connected) { + if (debug->panel->video_test) + seq_puts(m, "1"); + else + seq_puts(m, "0"); + } else { + seq_puts(m, "0"); + } + + return 0; +} + +static int dp_test_active_open(struct inode *inode, + struct file *file) +{ + return single_open(file, dp_test_active_show, + inode->i_private); +} + +static const struct file_operations test_active_fops = { + .owner = THIS_MODULE, + .open = dp_test_active_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = dp_test_active_write +}; + +static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) +{ + char path[64]; + struct dp_debug_private *debug = container_of(dp_debug, + struct dp_debug_private, dp_debug); + + snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name); + + debug->root = debugfs_create_dir(path, minor->debugfs_root); + + debugfs_create_file("dp_debug", 0444, debug->root, + debug, &dp_debug_fops); + + debugfs_create_file("msm_dp_test_active", 0444, + debug->root, + debug, &test_active_fops); + + debugfs_create_file("msm_dp_test_data", 0444, + debug->root, + debug, &dp_test_data_fops); + + debugfs_create_file("msm_dp_test_type", 0444, + debug->root, + debug, &dp_test_type_fops); +} + +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector *connector, struct drm_minor *minor) +{ + struct dp_debug_private *debug; + struct dp_debug *dp_debug; + int rc; + + if (!dev || !panel || !usbpd || !link) { + DRM_ERROR("invalid input\n"); + rc = -EINVAL; + goto error; + } + + debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL); + if (!debug) { + rc = -ENOMEM; + goto error; + } + + debug->dp_debug.debug_en = false; + debug->usbpd = usbpd; + debug->link = link; + debug->panel = panel; + debug->dev = dev; + debug->drm_dev = minor->dev; + debug->connector = connector; + + dp_debug = &debug->dp_debug; + dp_debug->vdisplay = 0; + dp_debug->hdisplay = 0; + dp_debug->vrefresh = 0; + + dp_debug_init(dp_debug, minor); + + return dp_debug; + error: + return ERR_PTR(rc); +} + +static int dp_debug_deinit(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return -EINVAL; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + debugfs_remove_recursive(debug->root); + + return 0; +} + +void dp_debug_put(struct dp_debug *dp_debug) +{ + struct dp_debug_private *debug; + + if (!dp_debug) + return; + + debug = container_of(dp_debug, struct dp_debug_private, dp_debug); + + dp_debug_deinit(dp_debug); + + devm_kfree(debug->dev, debug); +} diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h new file mode 100644 index 000000000..8c0d0b517 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DEBUG_H_ +#define _DP_DEBUG_H_ + +#include "dp_panel.h" +#include "dp_link.h" + +/** + * struct dp_debug + * @debug_en: specifies whether debug mode enabled + * @vdisplay: used to filter out vdisplay value + * @hdisplay: used to filter out hdisplay value + * @vrefresh: used to filter out vrefresh value + * @tpg_state: specifies whether tpg feature is enabled + */ +struct dp_debug { + bool debug_en; + int aspect_ratio; + int vdisplay; + int hdisplay; + int vrefresh; +}; + +#if defined(CONFIG_DEBUG_FS) + +/** + * dp_debug_get() - configure and get the DisplayPlot debug module data + * + * @dev: device instance of the caller + * @panel: instance of panel module + * @usbpd: instance of usbpd module + * @link: instance of link module + * @connector: double pointer to display connector + * @minor: pointer to drm minor number after device registration + * return: pointer to allocated debug module data + * + * This function sets up the debug module and provides a way + * for debugfs input to be communicated with existing modules + */ +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector *connector, + struct drm_minor *minor); + +/** + * dp_debug_put() + * + * Cleans up dp_debug instance + * + * @dp_debug: instance of dp_debug + */ +void dp_debug_put(struct dp_debug *dp_debug); + +#else + +static inline +struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, + struct dp_usbpd *usbpd, struct dp_link *link, + struct drm_connector *connector, struct drm_minor *minor) +{ + return ERR_PTR(-EINVAL); +} + +static inline void dp_debug_put(struct dp_debug *dp_debug) +{ +} + +#endif /* defined(CONFIG_DEBUG_FS) */ + +#endif /* _DP_DEBUG_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c new file mode 100644 index 000000000..d16c12351 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -0,0 +1,1784 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "dp_hpd.h" +#include "dp_parser.h" +#include "dp_power.h" +#include "dp_catalog.h" +#include "dp_aux.h" +#include "dp_reg.h" +#include "dp_link.h" +#include "dp_panel.h" +#include "dp_ctrl.h" +#include "dp_display.h" +#include "dp_drm.h" +#include "dp_audio.h" +#include "dp_debug.h" + +#define HPD_STRING_SIZE 30 + +enum { + ISR_DISCONNECTED, + ISR_CONNECT_PENDING, + ISR_CONNECTED, + ISR_HPD_REPLUG_COUNT, + ISR_IRQ_HPD_PULSE_COUNT, + ISR_HPD_LO_GLITH_COUNT, +}; + +/* event thread connection state */ +enum { + ST_DISCONNECTED, + ST_MAINLINK_READY, + ST_CONNECTED, + ST_DISCONNECT_PENDING, + ST_DISPLAY_OFF, + ST_SUSPENDED, +}; + +enum { + EV_NO_EVENT, + /* hpd events */ + EV_HPD_INIT_SETUP, + EV_HPD_PLUG_INT, + EV_IRQ_HPD_INT, + EV_HPD_UNPLUG_INT, + EV_USER_NOTIFICATION, +}; + +#define EVENT_TIMEOUT (HZ/10) /* 100ms */ +#define DP_EVENT_Q_MAX 8 + +#define DP_TIMEOUT_NONE 0 + +#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2) + +struct dp_event { + u32 event_id; + u32 data; + u32 delay; +}; + +struct dp_display_private { + char *name; + int irq; + + unsigned int id; + + /* state variables */ + bool core_initialized; + bool phy_initialized; + bool hpd_irq_on; + bool audio_supported; + + struct drm_device *drm_dev; + struct platform_device *pdev; + struct dentry *root; + + struct dp_usbpd *usbpd; + struct dp_parser *parser; + struct dp_power *power; + struct dp_catalog *catalog; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_panel *panel; + struct dp_ctrl *ctrl; + struct dp_debug *debug; + + struct dp_usbpd_cb usbpd_cb; + struct dp_display_mode dp_mode; + struct msm_dp dp_display; + + /* wait for audio signaling */ + struct completion audio_comp; + + /* event related only access by event thread */ + struct mutex event_mutex; + wait_queue_head_t event_q; + u32 hpd_state; + u32 event_pndx; + u32 event_gndx; + struct task_struct *ev_tsk; + struct dp_event event_list[DP_EVENT_Q_MAX]; + spinlock_t event_lock; + + bool wide_bus_en; + + struct dp_audio *audio; +}; + +struct msm_dp_desc { + phys_addr_t io_start; + unsigned int connector_type; + bool wide_bus_en; +}; + +struct msm_dp_config { + const struct msm_dp_desc *descs; + size_t num_descs; +}; + +static const struct msm_dp_desc sc7180_dp_descs[] = { + [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, +}; + +static const struct msm_dp_config sc7180_dp_cfg = { + .descs = sc7180_dp_descs, + .num_descs = ARRAY_SIZE(sc7180_dp_descs), +}; + +static const struct msm_dp_desc sc7280_dp_descs[] = { + [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true }, + [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true }, +}; + +static const struct msm_dp_config sc7280_dp_cfg = { + .descs = sc7280_dp_descs, + .num_descs = ARRAY_SIZE(sc7280_dp_descs), +}; + +static const struct msm_dp_desc sc8180x_dp_descs[] = { + [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, + [MSM_DP_CONTROLLER_1] = { .io_start = 0x0ae98000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, + [MSM_DP_CONTROLLER_2] = { .io_start = 0x0ae9a000, .connector_type = DRM_MODE_CONNECTOR_eDP }, +}; + +static const struct msm_dp_config sc8180x_dp_cfg = { + .descs = sc8180x_dp_descs, + .num_descs = ARRAY_SIZE(sc8180x_dp_descs), +}; + +static const struct msm_dp_desc sm8350_dp_descs[] = { + [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, +}; + +static const struct msm_dp_config sm8350_dp_cfg = { + .descs = sm8350_dp_descs, + .num_descs = ARRAY_SIZE(sm8350_dp_descs), +}; + +static const struct of_device_id dp_dt_match[] = { + { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg }, + { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_cfg }, + { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg }, + { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_cfg }, + { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_cfg }, + { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_cfg }, + {} +}; + +static struct dp_display_private *dev_get_dp_display_private(struct device *dev) +{ + struct msm_dp *dp = dev_get_drvdata(dev); + + return container_of(dp, struct dp_display_private, dp_display); +} + +static int dp_add_event(struct dp_display_private *dp_priv, u32 event, + u32 data, u32 delay) +{ + unsigned long flag; + struct dp_event *todo; + int pndx; + + spin_lock_irqsave(&dp_priv->event_lock, flag); + pndx = dp_priv->event_pndx + 1; + pndx %= DP_EVENT_Q_MAX; + if (pndx == dp_priv->event_gndx) { + pr_err("event_q is full: pndx=%d gndx=%d\n", + dp_priv->event_pndx, dp_priv->event_gndx); + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + return -EPERM; + } + todo = &dp_priv->event_list[dp_priv->event_pndx++]; + dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo->event_id = event; + todo->data = data; + todo->delay = delay; + wake_up(&dp_priv->event_q); + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + return 0; +} + +static int dp_del_event(struct dp_display_private *dp_priv, u32 event) +{ + unsigned long flag; + struct dp_event *todo; + u32 gndx; + + spin_lock_irqsave(&dp_priv->event_lock, flag); + if (dp_priv->event_pndx == dp_priv->event_gndx) { + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + return -ENOENT; + } + + gndx = dp_priv->event_gndx; + while (dp_priv->event_pndx != gndx) { + todo = &dp_priv->event_list[gndx]; + if (todo->event_id == event) { + todo->event_id = EV_NO_EVENT; /* deleted */ + todo->delay = 0; + } + gndx++; + gndx %= DP_EVENT_Q_MAX; + } + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + return 0; +} + +void dp_display_signal_audio_start(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + reinit_completion(&dp->audio_comp); +} + +void dp_display_signal_audio_complete(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + complete_all(&dp->audio_comp); +} + +static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv); + +static int dp_display_bind(struct device *dev, struct device *master, + void *data) +{ + int rc = 0; + struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_drm_private *priv = dev_get_drvdata(master); + struct drm_device *drm = priv->dev; + + dp->dp_display.drm_dev = drm; + priv->dp[dp->id] = &dp->dp_display; + + rc = dp->parser->parse(dp->parser); + if (rc) { + DRM_ERROR("device tree parsing failed\n"); + goto end; + } + + + dp->drm_dev = drm; + dp->aux->drm_dev = drm; + rc = dp_aux_register(dp->aux); + if (rc) { + DRM_ERROR("DRM DP AUX register failed\n"); + goto end; + } + + rc = dp_power_client_init(dp->power); + if (rc) { + DRM_ERROR("Power client create failed\n"); + goto end; + } + + rc = dp_register_audio_driver(dev, dp->audio); + if (rc) { + DRM_ERROR("Audio registration Dp failed\n"); + goto end; + } + + rc = dp_hpd_event_thread_start(dp); + if (rc) { + DRM_ERROR("Event thread create failed\n"); + goto end; + } + + return 0; +end: + return rc; +} + +static void dp_display_unbind(struct device *dev, struct device *master, + void *data) +{ + struct dp_display_private *dp = dev_get_dp_display_private(dev); + struct msm_drm_private *priv = dev_get_drvdata(master); + + /* disable all HPD interrupts */ + if (dp->core_initialized) + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + + kthread_stop(dp->ev_tsk); + + dp_power_client_deinit(dp->power); + dp_unregister_audio_driver(dev, dp->audio); + dp_aux_unregister(dp->aux); + dp->drm_dev = NULL; + dp->aux->drm_dev = NULL; + priv->dp[dp->id] = NULL; +} + +static const struct component_ops dp_display_comp_ops = { + .bind = dp_display_bind, + .unbind = dp_display_unbind, +}; + +static bool dp_display_is_ds_bridge(struct dp_panel *panel) +{ + return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT); +} + +static bool dp_display_is_sink_count_zero(struct dp_display_private *dp) +{ + drm_dbg_dp(dp->drm_dev, "present=%#x sink_count=%d\n", + dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT], + dp->link->sink_count); + return dp_display_is_ds_bridge(dp->panel) && + (dp->link->sink_count == 0); +} + +static void dp_display_send_hpd_event(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + struct drm_connector *connector; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + connector = dp->dp_display.connector; + drm_helper_hpd_irq_event(connector->dev); +} + + +static int dp_display_send_hpd_notification(struct dp_display_private *dp, + bool hpd) +{ + if ((hpd && dp->dp_display.is_connected) || + (!hpd && !dp->dp_display.is_connected)) { + drm_dbg_dp(dp->drm_dev, "HPD already %s\n", + (hpd ? "on" : "off")); + return 0; + } + + /* reset video pattern flag on disconnect */ + if (!hpd) + dp->panel->video_test = false; + + dp->dp_display.is_connected = hpd; + + drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n", + dp->dp_display.connector_type, hpd); + dp_display_send_hpd_event(&dp->dp_display); + + return 0; +} + +static int dp_display_process_hpd_high(struct dp_display_private *dp) +{ + int rc = 0; + struct edid *edid; + + dp->panel->max_dp_lanes = dp->parser->max_dp_lanes; + + rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector); + if (rc) + goto end; + + dp_link_process_request(dp->link); + + edid = dp->panel->edid; + + dp->audio_supported = drm_detect_monitor_audio(edid); + dp_panel_handle_sink_request(dp->panel); + + dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes; + + /* + * set sink to normal operation mode -- D0 + * before dpcd read + */ + dp_link_psm_config(dp->link, &dp->panel->link_info, false); + + dp_link_reset_phy_params_vx_px(dp->link); + rc = dp_ctrl_on_link(dp->ctrl); + if (rc) { + DRM_ERROR("failed to complete DP link training\n"); + goto end; + } + + dp_add_event(dp, EV_USER_NOTIFICATION, true, 0); + +end: + return rc; +} + +static void dp_display_host_phy_init(struct dp_display_private *dp) +{ + drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", + dp->dp_display.connector_type, dp->core_initialized, + dp->phy_initialized); + + if (!dp->phy_initialized) { + dp_ctrl_phy_init(dp->ctrl); + dp->phy_initialized = true; + } +} + +static void dp_display_host_phy_exit(struct dp_display_private *dp) +{ + drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", + dp->dp_display.connector_type, dp->core_initialized, + dp->phy_initialized); + + if (dp->phy_initialized) { + dp_ctrl_phy_exit(dp->ctrl); + dp->phy_initialized = false; + } +} + +static void dp_display_host_init(struct dp_display_private *dp) +{ + drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", + dp->dp_display.connector_type, dp->core_initialized, + dp->phy_initialized); + + dp_power_init(dp->power, false); + dp_ctrl_reset_irq_ctrl(dp->ctrl, true); + dp_aux_init(dp->aux); + dp->core_initialized = true; +} + +static void dp_display_host_deinit(struct dp_display_private *dp) +{ + drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n", + dp->dp_display.connector_type, dp->core_initialized, + dp->phy_initialized); + + dp_ctrl_reset_irq_ctrl(dp->ctrl, false); + dp_aux_deinit(dp->aux); + dp_power_deinit(dp->power); + dp->core_initialized = false; +} + +static int dp_display_usbpd_configure_cb(struct device *dev) +{ + struct dp_display_private *dp = dev_get_dp_display_private(dev); + + dp_display_host_phy_init(dp); + + return dp_display_process_hpd_high(dp); +} + +static int dp_display_usbpd_disconnect_cb(struct device *dev) +{ + return 0; +} + +static int dp_display_notify_disconnect(struct device *dev) +{ + struct dp_display_private *dp = dev_get_dp_display_private(dev); + + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + + return 0; +} + +static void dp_display_handle_video_request(struct dp_display_private *dp) +{ + if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) { + dp->panel->video_test = true; + dp_link_send_test_response(dp->link); + } +} + +static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp) +{ + int rc = 0; + + if (dp_display_is_sink_count_zero(dp)) { + drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n"); + if (dp->hpd_state != ST_DISCONNECTED) { + dp->hpd_state = ST_DISCONNECT_PENDING; + dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); + } + } else { + if (dp->hpd_state == ST_DISCONNECTED) { + dp->hpd_state = ST_MAINLINK_READY; + rc = dp_display_process_hpd_high(dp); + if (rc) + dp->hpd_state = ST_DISCONNECTED; + } + } + + return rc; +} + +static int dp_display_handle_irq_hpd(struct dp_display_private *dp) +{ + u32 sink_request = dp->link->sink_request; + + drm_dbg_dp(dp->drm_dev, "%d\n", sink_request); + if (dp->hpd_state == ST_DISCONNECTED) { + if (sink_request & DP_LINK_STATUS_UPDATED) { + drm_dbg_dp(dp->drm_dev, "Disconnected sink_request: %d\n", + sink_request); + DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n"); + return -EINVAL; + } + } + + dp_ctrl_handle_sink_request(dp->ctrl); + + if (sink_request & DP_TEST_LINK_VIDEO_PATTERN) + dp_display_handle_video_request(dp); + + return 0; +} + +static int dp_display_usbpd_attention_cb(struct device *dev) +{ + int rc = 0; + u32 sink_request; + struct dp_display_private *dp = dev_get_dp_display_private(dev); + + /* check for any test request issued by sink */ + rc = dp_link_process_request(dp->link); + if (!rc) { + sink_request = dp->link->sink_request; + drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n", + dp->hpd_state, sink_request); + if (sink_request & DS_PORT_STATUS_CHANGED) + rc = dp_display_handle_port_ststus_changed(dp); + else + rc = dp_display_handle_irq_hpd(dp); + } + + return rc; +} + +static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) +{ + struct dp_usbpd *hpd = dp->usbpd; + u32 state; + int ret; + + if (!hpd) + return 0; + + mutex_lock(&dp->event_mutex); + + state = dp->hpd_state; + drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", + dp->dp_display.connector_type, state); + + if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_MAINLINK_READY || state == ST_CONNECTED) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_DISCONNECT_PENDING) { + /* wait until ST_DISCONNECTED */ + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + + ret = dp_display_usbpd_configure_cb(&dp->pdev->dev); + if (ret) { /* link train failed */ + dp->hpd_state = ST_DISCONNECTED; + } else { + dp->hpd_state = ST_MAINLINK_READY; + } + + /* enable HDP irq_hpd/replug interrupt */ + dp_catalog_hpd_config_intr(dp->catalog, + DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true); + + drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", + dp->dp_display.connector_type, state); + mutex_unlock(&dp->event_mutex); + + /* uevent will complete connection part */ + return 0; +}; + +static void dp_display_handle_plugged_change(struct msm_dp *dp_display, + bool plugged) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, + struct dp_display_private, dp_display); + + /* notify audio subsystem only if sink supports audio */ + if (dp_display->plugged_cb && dp_display->codec_dev && + dp->audio_supported) + dp_display->plugged_cb(dp_display->codec_dev, plugged); +} + +static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) +{ + struct dp_usbpd *hpd = dp->usbpd; + u32 state; + + if (!hpd) + return 0; + + mutex_lock(&dp->event_mutex); + + state = dp->hpd_state; + + drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", + dp->dp_display.connector_type, state); + + /* disable irq_hpd/replug interrupts */ + dp_catalog_hpd_config_intr(dp->catalog, + DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, false); + + /* unplugged, no more irq_hpd handle */ + dp_del_event(dp, EV_IRQ_HPD_INT); + + if (state == ST_DISCONNECTED) { + /* triggered by irq_hdp with sink_count = 0 */ + if (dp->link->sink_count == 0) { + dp_display_host_phy_exit(dp); + } + dp_display_notify_disconnect(&dp->pdev->dev); + mutex_unlock(&dp->event_mutex); + return 0; + } else if (state == ST_DISCONNECT_PENDING) { + mutex_unlock(&dp->event_mutex); + return 0; + } else if (state == ST_MAINLINK_READY) { + dp_ctrl_off_link(dp->ctrl); + dp_display_host_phy_exit(dp); + dp->hpd_state = ST_DISCONNECTED; + dp_display_notify_disconnect(&dp->pdev->dev); + mutex_unlock(&dp->event_mutex); + return 0; + } + + /* disable HPD plug interrupts */ + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false); + + /* + * We don't need separate work for disconnect as + * connect/attention interrupts are disabled + */ + dp_display_notify_disconnect(&dp->pdev->dev); + + if (state == ST_DISPLAY_OFF) { + dp->hpd_state = ST_DISCONNECTED; + } else { + dp->hpd_state = ST_DISCONNECT_PENDING; + } + + /* signal the disconnect event early to ensure proper teardown */ + dp_display_handle_plugged_change(&dp->dp_display, false); + + /* enable HDP plug interrupt to prepare for next plugin */ + if (!dp->dp_display.is_edp) + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true); + + drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", + dp->dp_display.connector_type, state); + + /* uevent will complete disconnection part */ + mutex_unlock(&dp->event_mutex); + return 0; +} + +static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data) +{ + u32 state; + + mutex_lock(&dp->event_mutex); + + /* irq_hpd can happen at either connected or disconnected state */ + state = dp->hpd_state; + drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n", + dp->dp_display.connector_type, state); + + if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) { + mutex_unlock(&dp->event_mutex); + return 0; + } + + if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) { + /* wait until ST_CONNECTED */ + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */ + mutex_unlock(&dp->event_mutex); + return 0; + } + + dp_display_usbpd_attention_cb(&dp->pdev->dev); + + drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n", + dp->dp_display.connector_type, state); + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static void dp_display_deinit_sub_modules(struct dp_display_private *dp) +{ + dp_debug_put(dp->debug); + dp_audio_put(dp->audio); + dp_panel_put(dp->panel); + dp_aux_put(dp->aux); +} + +static int dp_init_sub_modules(struct dp_display_private *dp) +{ + int rc = 0; + struct device *dev = &dp->pdev->dev; + struct dp_usbpd_cb *cb = &dp->usbpd_cb; + struct dp_panel_in panel_in = { + .dev = dev, + }; + + /* Callback APIs used for cable status change event */ + cb->configure = dp_display_usbpd_configure_cb; + cb->disconnect = dp_display_usbpd_disconnect_cb; + cb->attention = dp_display_usbpd_attention_cb; + + dp->usbpd = dp_hpd_get(dev, cb); + if (IS_ERR(dp->usbpd)) { + rc = PTR_ERR(dp->usbpd); + DRM_ERROR("failed to initialize hpd, rc = %d\n", rc); + dp->usbpd = NULL; + goto error; + } + + dp->parser = dp_parser_get(dp->pdev); + if (IS_ERR(dp->parser)) { + rc = PTR_ERR(dp->parser); + DRM_ERROR("failed to initialize parser, rc = %d\n", rc); + dp->parser = NULL; + goto error; + } + + dp->catalog = dp_catalog_get(dev, &dp->parser->io); + if (IS_ERR(dp->catalog)) { + rc = PTR_ERR(dp->catalog); + DRM_ERROR("failed to initialize catalog, rc = %d\n", rc); + dp->catalog = NULL; + goto error; + } + + dp->power = dp_power_get(dev, dp->parser); + if (IS_ERR(dp->power)) { + rc = PTR_ERR(dp->power); + DRM_ERROR("failed to initialize power, rc = %d\n", rc); + dp->power = NULL; + goto error; + } + + dp->aux = dp_aux_get(dev, dp->catalog, dp->dp_display.is_edp); + if (IS_ERR(dp->aux)) { + rc = PTR_ERR(dp->aux); + DRM_ERROR("failed to initialize aux, rc = %d\n", rc); + dp->aux = NULL; + goto error; + } + + dp->link = dp_link_get(dev, dp->aux); + if (IS_ERR(dp->link)) { + rc = PTR_ERR(dp->link); + DRM_ERROR("failed to initialize link, rc = %d\n", rc); + dp->link = NULL; + goto error_link; + } + + panel_in.aux = dp->aux; + panel_in.catalog = dp->catalog; + panel_in.link = dp->link; + + dp->panel = dp_panel_get(&panel_in); + if (IS_ERR(dp->panel)) { + rc = PTR_ERR(dp->panel); + DRM_ERROR("failed to initialize panel, rc = %d\n", rc); + dp->panel = NULL; + goto error_link; + } + + dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux, + dp->power, dp->catalog, dp->parser); + if (IS_ERR(dp->ctrl)) { + rc = PTR_ERR(dp->ctrl); + DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc); + dp->ctrl = NULL; + goto error_ctrl; + } + + dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog); + if (IS_ERR(dp->audio)) { + rc = PTR_ERR(dp->audio); + pr_err("failed to initialize audio, rc = %d\n", rc); + dp->audio = NULL; + goto error_ctrl; + } + + /* populate wide_bus_en to differernt layers */ + dp->ctrl->wide_bus_en = dp->wide_bus_en; + dp->catalog->wide_bus_en = dp->wide_bus_en; + + return rc; + +error_ctrl: + dp_panel_put(dp->panel); +error_link: + dp_aux_put(dp->aux); +error: + return rc; +} + +static int dp_display_set_mode(struct msm_dp *dp_display, + struct dp_display_mode *mode) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); + dp->panel->dp_mode.bpp = mode->bpp; + dp->panel->dp_mode.capabilities = mode->capabilities; + dp_panel_init_panel_info(dp->panel); + return 0; +} + +static int dp_display_enable(struct dp_display_private *dp, bool force_link_train) +{ + int rc = 0; + struct msm_dp *dp_display = &dp->dp_display; + + drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count); + if (dp_display->power_on) { + drm_dbg_dp(dp->drm_dev, "Link already setup, return\n"); + return 0; + } + + rc = dp_ctrl_on_stream(dp->ctrl, force_link_train); + if (!rc) + dp_display->power_on = true; + + return rc; +} + +static int dp_display_post_enable(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + u32 rate; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + rate = dp->link->link_params.rate; + + if (dp->audio_supported) { + dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate); + dp->audio->lane_count = dp->link->link_params.num_lanes; + } + + /* signal the connect event late to synchronize video and display */ + dp_display_handle_plugged_change(dp_display, true); + return 0; +} + +static int dp_display_disable(struct dp_display_private *dp) +{ + struct msm_dp *dp_display = &dp->dp_display; + + if (!dp_display->power_on) + return 0; + + /* wait only if audio was enabled */ + if (dp_display->audio_enabled) { + /* signal the disconnect event */ + dp_display_handle_plugged_change(dp_display, false); + if (!wait_for_completion_timeout(&dp->audio_comp, + HZ * 5)) + DRM_ERROR("audio comp timeout\n"); + } + + dp_display->audio_enabled = false; + + if (dp->link->sink_count == 0) { + /* + * irq_hpd with sink_count = 0 + * hdmi unplugged out of dongle + */ + dp_ctrl_off_link_stream(dp->ctrl); + } else { + /* + * unplugged interrupt + * dongle unplugged out of DUT + */ + dp_ctrl_off(dp->ctrl); + dp_display_host_phy_exit(dp); + } + + dp_display->power_on = false; + + drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count); + return 0; +} + +int dp_display_set_plugged_cb(struct msm_dp *dp_display, + hdmi_codec_plugged_cb fn, struct device *codec_dev) +{ + bool plugged; + + dp_display->plugged_cb = fn; + dp_display->codec_dev = codec_dev; + plugged = dp_display->is_connected; + dp_display_handle_plugged_change(dp_display, plugged); + + return 0; +} + +/** + * dp_bridge_mode_valid - callback to determine if specified mode is valid + * @bridge: Pointer to drm bridge structure + * @info: display info + * @mode: Pointer to drm mode structure + * Returns: Validity status for specified mode + */ +enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + const u32 num_components = 3, default_bpp = 24; + struct dp_display_private *dp_display; + struct dp_link_info *link_info; + u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0; + struct msm_dp *dp; + int mode_pclk_khz = mode->clock; + + dp = to_dp_bridge(bridge)->dp_display; + + if (!dp || !mode_pclk_khz || !dp->connector) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + /* + * The eDP controller currently does not have a reliable way of + * enabling panel power to read sink capabilities. So, we rely + * on the panel driver to populate only supported modes for now. + */ + if (dp->is_edp) + return MODE_OK; + + if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) + return MODE_CLOCK_HIGH; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + link_info = &dp_display->panel->link_info; + + mode_bpp = dp->connector->display_info.bpc * num_components; + if (!mode_bpp) + mode_bpp = default_bpp; + + mode_bpp = dp_panel_get_mode_bpp(dp_display->panel, + mode_bpp, mode_pclk_khz); + + mode_rate_khz = mode_pclk_khz * mode_bpp; + supported_rate_khz = link_info->num_lanes * link_info->rate * 8; + + if (mode_rate_khz > supported_rate_khz) + return MODE_BAD; + + return MODE_OK; +} + +int dp_display_get_modes(struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + if (!dp) { + DRM_ERROR("invalid params\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + return dp_panel_get_modes(dp_display->panel, + dp->connector); +} + +bool dp_display_check_video_test(struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + return dp_display->panel->video_test; +} + +int dp_display_get_test_bpp(struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + if (!dp) { + DRM_ERROR("invalid params\n"); + return 0; + } + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + return dp_link_bit_depth_to_bpp( + dp_display->link->test_video.test_bit_depth); +} + +void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp) +{ + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + /* + * if we are reading registers we need the link clocks to be on + * however till DP cable is connected this will not happen as we + * do not know the resolution to power up with. Hence check the + * power_on status before dumping DP registers to avoid crash due + * to unclocked access + */ + mutex_lock(&dp_display->event_mutex); + + if (!dp->power_on) { + mutex_unlock(&dp_display->event_mutex); + return; + } + + dp_catalog_snapshot(dp_display->catalog, disp_state); + + mutex_unlock(&dp_display->event_mutex); +} + +static void dp_display_config_hpd(struct dp_display_private *dp) +{ + + dp_display_host_init(dp); + dp_catalog_ctrl_hpd_config(dp->catalog); + + /* Enable plug and unplug interrupts only for external DisplayPort */ + if (!dp->dp_display.is_edp) + dp_catalog_hpd_config_intr(dp->catalog, + DP_DP_HPD_PLUG_INT_MASK | + DP_DP_HPD_UNPLUG_INT_MASK, + true); + + /* Enable interrupt first time + * we are leaving dp clocks on during disconnect + * and never disable interrupt + */ + enable_irq(dp->irq); +} + +static int hpd_event_thread(void *data) +{ + struct dp_display_private *dp_priv; + unsigned long flag; + struct dp_event *todo; + int timeout_mode = 0; + + dp_priv = (struct dp_display_private *)data; + + while (1) { + if (timeout_mode) { + wait_event_timeout(dp_priv->event_q, + (dp_priv->event_pndx == dp_priv->event_gndx) || + kthread_should_stop(), EVENT_TIMEOUT); + } else { + wait_event_interruptible(dp_priv->event_q, + (dp_priv->event_pndx != dp_priv->event_gndx) || + kthread_should_stop()); + } + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&dp_priv->event_lock, flag); + todo = &dp_priv->event_list[dp_priv->event_gndx]; + if (todo->delay) { + struct dp_event *todo_next; + + dp_priv->event_gndx++; + dp_priv->event_gndx %= DP_EVENT_Q_MAX; + + /* re enter delay event into q */ + todo_next = &dp_priv->event_list[dp_priv->event_pndx++]; + dp_priv->event_pndx %= DP_EVENT_Q_MAX; + todo_next->event_id = todo->event_id; + todo_next->data = todo->data; + todo_next->delay = todo->delay - 1; + + /* clean up older event */ + todo->event_id = EV_NO_EVENT; + todo->delay = 0; + + /* switch to timeout mode */ + timeout_mode = 1; + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + continue; + } + + /* timeout with no events in q */ + if (dp_priv->event_pndx == dp_priv->event_gndx) { + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + continue; + } + + dp_priv->event_gndx++; + dp_priv->event_gndx %= DP_EVENT_Q_MAX; + timeout_mode = 0; + spin_unlock_irqrestore(&dp_priv->event_lock, flag); + + switch (todo->event_id) { + case EV_HPD_INIT_SETUP: + dp_display_config_hpd(dp_priv); + break; + case EV_HPD_PLUG_INT: + dp_hpd_plug_handle(dp_priv, todo->data); + break; + case EV_HPD_UNPLUG_INT: + dp_hpd_unplug_handle(dp_priv, todo->data); + break; + case EV_IRQ_HPD_INT: + dp_irq_hpd_handle(dp_priv, todo->data); + break; + case EV_USER_NOTIFICATION: + dp_display_send_hpd_notification(dp_priv, + todo->data); + break; + default: + break; + } + } + + return 0; +} + +static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv) +{ + /* set event q to empty */ + dp_priv->event_gndx = 0; + dp_priv->event_pndx = 0; + + dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler"); + if (IS_ERR(dp_priv->ev_tsk)) + return PTR_ERR(dp_priv->ev_tsk); + + return 0; +} + +static irqreturn_t dp_display_irq_handler(int irq, void *dev_id) +{ + struct dp_display_private *dp = dev_id; + irqreturn_t ret = IRQ_HANDLED; + u32 hpd_isr_status; + + if (!dp) { + DRM_ERROR("invalid data\n"); + return IRQ_NONE; + } + + hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog); + + if (hpd_isr_status & 0x0F) { + drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n", + dp->dp_display.connector_type, hpd_isr_status); + /* hpd related interrupts */ + if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK) + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0); + + if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) { + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0); + } + + if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) { + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3); + } + + if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK) + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0); + } + + /* DP controller isr */ + dp_ctrl_isr(dp->ctrl); + + /* DP aux isr */ + dp_aux_isr(dp->aux); + + return ret; +} + +int dp_display_request_irq(struct msm_dp *dp_display) +{ + int rc = 0; + struct dp_display_private *dp; + + if (!dp_display) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0); + if (!dp->irq) { + DRM_ERROR("failed to get irq\n"); + return -EINVAL; + } + + rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq, + dp_display_irq_handler, + IRQF_TRIGGER_HIGH, "dp_display_isr", dp); + if (rc < 0) { + DRM_ERROR("failed to request IRQ%u: %d\n", + dp->irq, rc); + return rc; + } + disable_irq(dp->irq); + + return 0; +} + +static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev, + unsigned int *id) +{ + const struct msm_dp_config *cfg = of_device_get_match_data(&pdev->dev); + struct resource *res; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return NULL; + + for (i = 0; i < cfg->num_descs; i++) { + if (cfg->descs[i].io_start == res->start) { + *id = i; + return &cfg->descs[i]; + } + } + + dev_err(&pdev->dev, "unknown displayport instance\n"); + return NULL; +} + +static int dp_display_probe(struct platform_device *pdev) +{ + int rc = 0; + struct dp_display_private *dp; + const struct msm_dp_desc *desc; + + if (!pdev || !pdev->dev.of_node) { + DRM_ERROR("pdev not found\n"); + return -ENODEV; + } + + dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + desc = dp_display_get_desc(pdev, &dp->id); + if (!desc) + return -EINVAL; + + dp->pdev = pdev; + dp->name = "drm_dp"; + dp->dp_display.connector_type = desc->connector_type; + dp->wide_bus_en = desc->wide_bus_en; + dp->dp_display.is_edp = + (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP); + + rc = dp_init_sub_modules(dp); + if (rc) { + DRM_ERROR("init sub module failed\n"); + return -EPROBE_DEFER; + } + + /* setup event q */ + mutex_init(&dp->event_mutex); + init_waitqueue_head(&dp->event_q); + spin_lock_init(&dp->event_lock); + + /* Store DP audio handle inside DP display */ + dp->dp_display.dp_audio = dp->audio; + + init_completion(&dp->audio_comp); + + platform_set_drvdata(pdev, &dp->dp_display); + + rc = component_add(&pdev->dev, &dp_display_comp_ops); + if (rc) { + DRM_ERROR("component add failed, rc=%d\n", rc); + dp_display_deinit_sub_modules(dp); + } + + return rc; +} + +static int dp_display_remove(struct platform_device *pdev) +{ + struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); + + component_del(&pdev->dev, &dp_display_comp_ops); + dp_display_deinit_sub_modules(dp); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static int dp_pm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_dp *dp_display = platform_get_drvdata(pdev); + struct dp_display_private *dp; + int sink_count = 0; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->event_mutex); + + drm_dbg_dp(dp->drm_dev, + "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n", + dp->dp_display.connector_type, dp->core_initialized, + dp->phy_initialized, dp_display->power_on); + + /* start from disconnected state */ + dp->hpd_state = ST_DISCONNECTED; + + /* turn on dp ctrl/phy */ + dp_display_host_init(dp); + + dp_catalog_ctrl_hpd_config(dp->catalog); + + + if (!dp->dp_display.is_edp) + dp_catalog_hpd_config_intr(dp->catalog, + DP_DP_HPD_PLUG_INT_MASK | + DP_DP_HPD_UNPLUG_INT_MASK, + true); + + if (dp_catalog_link_is_connected(dp->catalog)) { + /* + * set sink to normal operation mode -- D0 + * before dpcd read + */ + dp_display_host_phy_init(dp); + dp_link_psm_config(dp->link, &dp->panel->link_info, false); + sink_count = drm_dp_read_sink_count(dp->aux); + if (sink_count < 0) + sink_count = 0; + + dp_display_host_phy_exit(dp); + } + + dp->link->sink_count = sink_count; + /* + * can not declared display is connected unless + * HDMI cable is plugged in and sink_count of + * dongle become 1 + * also only signal audio when disconnected + */ + if (dp->link->sink_count) { + dp->dp_display.is_connected = true; + } else { + dp->dp_display.is_connected = false; + dp_display_handle_plugged_change(dp_display, false); + } + + drm_dbg_dp(dp->drm_dev, + "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n", + dp->dp_display.connector_type, dp->link->sink_count, + dp->dp_display.is_connected, dp->core_initialized, + dp->phy_initialized, dp_display->power_on); + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static int dp_pm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_dp *dp_display = platform_get_drvdata(pdev); + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + mutex_lock(&dp->event_mutex); + + drm_dbg_dp(dp->drm_dev, + "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n", + dp->dp_display.connector_type, dp->core_initialized, + dp->phy_initialized, dp_display->power_on); + + /* mainlink enabled */ + if (dp_power_clk_status(dp->power, DP_CTRL_PM)) + dp_ctrl_off_link_stream(dp->ctrl); + + dp_display_host_phy_exit(dp); + + /* host_init will be called at pm_resume */ + dp_display_host_deinit(dp); + + dp->hpd_state = ST_SUSPENDED; + + drm_dbg_dp(dp->drm_dev, + "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n", + dp->dp_display.connector_type, dp->core_initialized, + dp->phy_initialized, dp_display->power_on); + + mutex_unlock(&dp->event_mutex); + + return 0; +} + +static const struct dev_pm_ops dp_pm_ops = { + .suspend = dp_pm_suspend, + .resume = dp_pm_resume, +}; + +static struct platform_driver dp_display_driver = { + .probe = dp_display_probe, + .remove = dp_display_remove, + .driver = { + .name = "msm-dp-display", + .of_match_table = dp_dt_match, + .suppress_bind_attrs = true, + .pm = &dp_pm_ops, + }, +}; + +int __init msm_dp_register(void) +{ + int ret; + + ret = platform_driver_register(&dp_display_driver); + if (ret) + DRM_ERROR("Dp display driver register failed"); + + return ret; +} + +void __exit msm_dp_unregister(void) +{ + platform_driver_unregister(&dp_display_driver); +} + +void msm_dp_irq_postinstall(struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + if (!dp_display) + return; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + if (!dp_display->is_edp) + dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100); +} + +bool msm_dp_wide_bus_available(const struct msm_dp *dp_display) +{ + struct dp_display_private *dp; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + + return dp->wide_bus_en; +} + +void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) +{ + struct dp_display_private *dp; + struct device *dev; + int rc; + + dp = container_of(dp_display, struct dp_display_private, dp_display); + dev = &dp->pdev->dev; + + dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd, + dp->link, dp->dp_display.connector, + minor); + if (IS_ERR(dp->debug)) { + rc = PTR_ERR(dp->debug); + DRM_ERROR("failed to initialize debug, rc = %d\n", rc); + dp->debug = NULL; + } +} + +static void of_dp_aux_depopulate_bus_void(void *data) +{ + of_dp_aux_depopulate_bus(data); +} + +static int dp_display_get_next_bridge(struct msm_dp *dp) +{ + int rc; + struct dp_display_private *dp_priv; + struct device_node *aux_bus; + struct device *dev; + + dp_priv = container_of(dp, struct dp_display_private, dp_display); + dev = &dp_priv->pdev->dev; + aux_bus = of_get_child_by_name(dev->of_node, "aux-bus"); + + if (aux_bus && dp->is_edp) { + dp_display_host_init(dp_priv); + dp_catalog_ctrl_hpd_config(dp_priv->catalog); + dp_display_host_phy_init(dp_priv); + enable_irq(dp_priv->irq); + + /* + * The code below assumes that the panel will finish probing + * by the time devm_of_dp_aux_populate_ep_devices() returns. + * This isn't a great assumption since it will fail if the + * panel driver is probed asynchronously but is the best we + * can do without a bigger driver reorganization. + */ + rc = of_dp_aux_populate_bus(dp_priv->aux, NULL); + of_node_put(aux_bus); + if (rc) + goto error; + + rc = devm_add_action_or_reset(dp->drm_dev->dev, + of_dp_aux_depopulate_bus_void, + dp_priv->aux); + if (rc) + goto error; + } else if (dp->is_edp) { + DRM_ERROR("eDP aux_bus not found\n"); + return -ENODEV; + } + + /* + * External bridges are mandatory for eDP interfaces: one has to + * provide at least an eDP panel (which gets wrapped into panel-bridge). + * + * For DisplayPort interfaces external bridges are optional, so + * silently ignore an error if one is not present (-ENODEV). + */ + rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser); + if (!dp->is_edp && rc == -ENODEV) + return 0; + + if (!rc) { + dp->next_bridge = dp_priv->parser->next_bridge; + return 0; + } + +error: + if (dp->is_edp) { + disable_irq(dp_priv->irq); + dp_display_host_phy_exit(dp_priv); + dp_display_host_deinit(dp_priv); + } + return rc; +} + +int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct msm_drm_private *priv; + struct dp_display_private *dp_priv; + int ret; + + if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev)) + return -EINVAL; + + priv = dev->dev_private; + + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + + dp_display->drm_dev = dev; + + dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + + ret = dp_display_request_irq(dp_display); + if (ret) { + DRM_ERROR("request_irq failed, ret=%d\n", ret); + return ret; + } + + ret = dp_display_get_next_bridge(dp_display); + if (ret) + return ret; + + dp_display->bridge = dp_bridge_init(dp_display, dev, encoder); + if (IS_ERR(dp_display->bridge)) { + ret = PTR_ERR(dp_display->bridge); + DRM_DEV_ERROR(dev->dev, + "failed to create dp bridge: %d\n", ret); + dp_display->bridge = NULL; + return ret; + } + + priv->bridges[priv->num_bridges++] = dp_display->bridge; + + dp_display->connector = dp_drm_connector_init(dp_display, encoder); + if (IS_ERR(dp_display->connector)) { + ret = PTR_ERR(dp_display->connector); + DRM_DEV_ERROR(dev->dev, + "failed to create dp connector: %d\n", ret); + dp_display->connector = NULL; + return ret; + } + + dp_priv->panel->connector = dp_display->connector; + + return 0; +} + +void dp_bridge_enable(struct drm_bridge *drm_bridge) +{ + struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = dp_bridge->dp_display; + int rc = 0; + struct dp_display_private *dp_display; + u32 state; + bool force_link_train = false; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + if (!dp_display->dp_mode.drm_mode.clock) { + DRM_ERROR("invalid params\n"); + return; + } + + if (dp->is_edp) + dp_hpd_plug_handle(dp_display, 0); + + mutex_lock(&dp_display->event_mutex); + + state = dp_display->hpd_state; + if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) { + mutex_unlock(&dp_display->event_mutex); + return; + } + + rc = dp_display_set_mode(dp, &dp_display->dp_mode); + if (rc) { + DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc); + mutex_unlock(&dp_display->event_mutex); + return; + } + + state = dp_display->hpd_state; + + if (state == ST_DISPLAY_OFF) { + dp_display_host_phy_init(dp_display); + force_link_train = true; + } + + dp_display_enable(dp_display, force_link_train); + + rc = dp_display_post_enable(dp); + if (rc) { + DRM_ERROR("DP display post enable failed, rc=%d\n", rc); + dp_display_disable(dp_display); + } + + /* completed connection */ + dp_display->hpd_state = ST_CONNECTED; + + drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); + mutex_unlock(&dp_display->event_mutex); +} + +void dp_bridge_disable(struct drm_bridge *drm_bridge) +{ + struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = dp_bridge->dp_display; + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + dp_ctrl_push_idle(dp_display->ctrl); +} + +void dp_bridge_post_disable(struct drm_bridge *drm_bridge) +{ + struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = dp_bridge->dp_display; + u32 state; + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + if (dp->is_edp) + dp_hpd_unplug_handle(dp_display, 0); + + mutex_lock(&dp_display->event_mutex); + + state = dp_display->hpd_state; + if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) { + mutex_unlock(&dp_display->event_mutex); + return; + } + + dp_display_disable(dp_display); + + state = dp_display->hpd_state; + if (state == ST_DISCONNECT_PENDING) { + /* completed disconnection */ + dp_display->hpd_state = ST_DISCONNECTED; + } else { + dp_display->hpd_state = ST_DISPLAY_OFF; + } + + drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type); + mutex_unlock(&dp_display->event_mutex); +} + +void dp_bridge_mode_set(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge); + struct msm_dp *dp = dp_bridge->dp_display; + struct dp_display_private *dp_display; + + dp_display = container_of(dp, struct dp_display_private, dp_display); + + memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode)); + + if (dp_display_check_video_test(dp)) + dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp); + else /* Default num_components per pixel = 3 */ + dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3; + + if (!dp_display->dp_mode.bpp) + dp_display->dp_mode.bpp = 24; /* Default bpp */ + + drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode); + + dp_display->dp_mode.v_active_low = + !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC); + + dp_display->dp_mode.h_active_low = + !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC); +} diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h new file mode 100644 index 000000000..dcedf021f --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DISPLAY_H_ +#define _DP_DISPLAY_H_ + +#include "dp_panel.h" +#include +#include "disp/msm_disp_snapshot.h" + +struct msm_dp { + struct drm_device *drm_dev; + struct device *codec_dev; + struct drm_bridge *bridge; + struct drm_connector *connector; + struct drm_bridge *next_bridge; + bool is_connected; + bool audio_enabled; + bool power_on; + unsigned int connector_type; + bool is_edp; + + hdmi_codec_plugged_cb plugged_cb; + + bool wide_bus_en; + + u32 max_dp_lanes; + struct dp_audio *dp_audio; +}; + +int dp_display_set_plugged_cb(struct msm_dp *dp_display, + hdmi_codec_plugged_cb fn, struct device *codec_dev); +int dp_display_get_modes(struct msm_dp *dp_display); +int dp_display_request_irq(struct msm_dp *dp_display); +bool dp_display_check_video_test(struct msm_dp *dp_display); +int dp_display_get_test_bpp(struct msm_dp *dp_display); +void dp_display_signal_audio_start(struct msm_dp *dp_display); +void dp_display_signal_audio_complete(struct msm_dp *dp_display); + +#endif /* _DP_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c new file mode 100644 index 000000000..6db82f9b0 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "msm_drv.h" +#include "msm_kms.h" +#include "dp_drm.h" + +/** + * dp_bridge_detect - callback to determine if connector is connected + * @bridge: Pointer to drm bridge structure + * Returns: Bridge's 'is connected' status + */ +static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) +{ + struct msm_dp *dp; + + dp = to_dp_bridge(bridge)->dp_display; + + drm_dbg_dp(dp->drm_dev, "is_connected = %s\n", + (dp->is_connected) ? "true" : "false"); + + return (dp->is_connected) ? connector_status_connected : + connector_status_disconnected; +} + +static int dp_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct msm_dp *dp; + + dp = to_dp_bridge(bridge)->dp_display; + + drm_dbg_dp(dp->drm_dev, "is_connected = %s\n", + (dp->is_connected) ? "true" : "false"); + + /* + * There is no protection in the DRM framework to check if the display + * pipeline has been already disabled before trying to disable it again. + * Hence if the sink is unplugged, the pipeline gets disabled, but the + * crtc->active is still true. Any attempt to set the mode or manually + * disable this encoder will result in the crash. + * + * TODO: add support for telling the DRM subsystem that the pipeline is + * disabled by the hardware and thus all access to it should be forbidden. + * After that this piece of code can be removed. + */ + if (bridge->ops & DRM_BRIDGE_OP_HPD) + return (dp->is_connected) ? 0 : -ENOTCONN; + + return 0; +} + + +/** + * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() + * @bridge: Poiner to drm bridge + * @connector: Pointer to drm connector structure + * Returns: Number of modes added + */ +static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) +{ + int rc = 0; + struct msm_dp *dp; + + if (!connector) + return 0; + + dp = to_dp_bridge(bridge)->dp_display; + + /* pluggable case assumes EDID is read when HPD */ + if (dp->is_connected) { + rc = dp_display_get_modes(dp); + if (rc <= 0) { + DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc); + return rc; + } + } else { + drm_dbg_dp(connector->dev, "No sink connected\n"); + } + return rc; +} + +static const struct drm_bridge_funcs dp_bridge_ops = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .enable = dp_bridge_enable, + .disable = dp_bridge_disable, + .post_disable = dp_bridge_post_disable, + .mode_set = dp_bridge_mode_set, + .mode_valid = dp_bridge_mode_valid, + .get_modes = dp_bridge_get_modes, + .detect = dp_bridge_detect, + .atomic_check = dp_bridge_atomic_check, +}; + +struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder) +{ + int rc; + struct msm_dp_bridge *dp_bridge; + struct drm_bridge *bridge; + + dp_bridge = devm_kzalloc(dev->dev, sizeof(*dp_bridge), GFP_KERNEL); + if (!dp_bridge) + return ERR_PTR(-ENOMEM); + + dp_bridge->dp_display = dp_display; + + bridge = &dp_bridge->bridge; + bridge->funcs = &dp_bridge_ops; + bridge->type = dp_display->connector_type; + + /* + * Many ops only make sense for DP. Why? + * - Detect/HPD are used by DRM to know if a display is _physically_ + * there, not whether the display is powered on / finished initting. + * On eDP we assume the display is always there because you can't + * know until power is applied. If we don't implement the ops DRM will + * assume our display is always there. + * - Currently eDP mode reading is driven by the panel driver. This + * allows the panel driver to properly power itself on to read the + * modes. + */ + if (!dp_display->is_edp) { + bridge->ops = + DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_HPD | + DRM_BRIDGE_OP_MODES; + } + + drm_bridge_add(bridge); + + rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (rc) { + DRM_ERROR("failed to attach bridge, rc=%d\n", rc); + drm_bridge_remove(bridge); + + return ERR_PTR(rc); + } + + if (dp_display->next_bridge) { + rc = drm_bridge_attach(encoder, + dp_display->next_bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (rc < 0) { + DRM_ERROR("failed to attach panel bridge: %d\n", rc); + drm_bridge_remove(bridge); + return ERR_PTR(rc); + } + } + + return bridge; +} + +/* connector initialization */ +struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder) +{ + struct drm_connector *connector = NULL; + + connector = drm_bridge_connector_init(dp_display->drm_dev, encoder); + if (IS_ERR(connector)) + return connector; + + drm_connector_attach_encoder(connector, encoder); + + return connector; +} diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h new file mode 100644 index 000000000..82035dbb0 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_drm.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_DRM_H_ +#define _DP_DRM_H_ + +#include +#include + +#include "msm_drv.h" +#include "dp_display.h" + +struct msm_dp_bridge { + struct drm_bridge bridge; + struct msm_dp *dp_display; +}; + +#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge) + +struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder); +struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, + struct drm_encoder *encoder); + +void dp_bridge_enable(struct drm_bridge *drm_bridge); +void dp_bridge_disable(struct drm_bridge *drm_bridge); +void dp_bridge_post_disable(struct drm_bridge *drm_bridge); +enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode); +void dp_bridge_mode_set(struct drm_bridge *drm_bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode); + +#endif /* _DP_DRM_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c new file mode 100644 index 000000000..db98a1d43 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include +#include + +#include "dp_hpd.h" + +/* DP specific VDM commands */ +#define DP_USBPD_VDM_STATUS 0x10 +#define DP_USBPD_VDM_CONFIGURE 0x11 + +/* USBPD-TypeC specific Macros */ +#define VDM_VERSION 0x0 +#define USB_C_DP_SID 0xFF01 + +struct dp_hpd_private { + struct device *dev; + struct dp_usbpd_cb *dp_cb; + struct dp_usbpd dp_usbpd; +}; + +int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd) +{ + int rc = 0; + struct dp_hpd_private *hpd_priv; + + hpd_priv = container_of(dp_usbpd, struct dp_hpd_private, + dp_usbpd); + + if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure + || !hpd_priv->dp_cb->disconnect) { + pr_err("hpd dp_cb not initialized\n"); + return -EINVAL; + } + if (hpd) + hpd_priv->dp_cb->configure(hpd_priv->dev); + else + hpd_priv->dp_cb->disconnect(hpd_priv->dev); + + return rc; +} + +struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb) +{ + struct dp_hpd_private *dp_hpd; + + if (!cb) { + pr_err("invalid cb data\n"); + return ERR_PTR(-EINVAL); + } + + dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL); + if (!dp_hpd) + return ERR_PTR(-ENOMEM); + + dp_hpd->dev = dev; + dp_hpd->dp_cb = cb; + + dp_hpd->dp_usbpd.connect = dp_hpd_connect; + + return &dp_hpd->dp_usbpd; +} diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h new file mode 100644 index 000000000..8feec5aa5 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_hpd.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_HPD_H_ +#define _DP_HPD_H_ + +//#include + +#include +#include + +enum plug_orientation { + ORIENTATION_NONE, + ORIENTATION_CC1, + ORIENTATION_CC2, +}; + +/** + * struct dp_usbpd - DisplayPort status + * + * @orientation: plug orientation configuration + * @low_pow_st: low power state + * @adaptor_dp_en: adaptor functionality enabled + * @multi_func: multi-function preferred + * @usb_config_req: request to switch to usb + * @exit_dp_mode: request exit from displayport mode + * @hpd_irq: Change in the status since last message + * @alt_mode_cfg_done: bool to specify alt mode status + * @debug_en: bool to specify debug mode + * @connect: simulate disconnect or connect for debug mode + */ +struct dp_usbpd { + enum plug_orientation orientation; + bool low_pow_st; + bool adaptor_dp_en; + bool multi_func; + bool usb_config_req; + bool exit_dp_mode; + bool hpd_irq; + bool alt_mode_cfg_done; + bool debug_en; + + int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd); +}; + +/** + * struct dp_usbpd_cb - callback functions provided by the client + * + * @configure: called by usbpd module when PD communication has + * been completed and the usb peripheral has been configured on + * dp mode. + * @disconnect: notify the cable disconnect issued by usb. + * @attention: notify any attention message issued by usb. + */ +struct dp_usbpd_cb { + int (*configure)(struct device *dev); + int (*disconnect)(struct device *dev); + int (*attention)(struct device *dev); +}; + +/** + * dp_hpd_get() - setup hpd module + * + * @dev: device instance of the caller + * @cb: struct containing callback function pointers. + * + * This function allows the client to initialize the usbpd + * module. The module will communicate with HPD module. + */ +struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb); + +int dp_hpd_register(struct dp_usbpd *dp_usbpd); +void dp_hpd_unregister(struct dp_usbpd *dp_usbpd); +int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd); + +#endif /* _DP_HPD_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c new file mode 100644 index 000000000..cb66d1126 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -0,0 +1,1223 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include + +#include "dp_link.h" +#include "dp_panel.h" + +#define DP_TEST_REQUEST_MASK 0x7F + +enum audio_sample_rate { + AUDIO_SAMPLE_RATE_32_KHZ = 0x00, + AUDIO_SAMPLE_RATE_44_1_KHZ = 0x01, + AUDIO_SAMPLE_RATE_48_KHZ = 0x02, + AUDIO_SAMPLE_RATE_88_2_KHZ = 0x03, + AUDIO_SAMPLE_RATE_96_KHZ = 0x04, + AUDIO_SAMPLE_RATE_176_4_KHZ = 0x05, + AUDIO_SAMPLE_RATE_192_KHZ = 0x06, +}; + +enum audio_pattern_type { + AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0x00, + AUDIO_TEST_PATTERN_SAWTOOTH = 0x01, +}; + +struct dp_link_request { + u32 test_requested; + u32 test_link_rate; + u32 test_lane_count; +}; + +struct dp_link_private { + u32 prev_sink_count; + struct device *dev; + struct drm_device *drm_dev; + struct drm_dp_aux *aux; + struct dp_link dp_link; + + struct dp_link_request request; + struct mutex psm_mutex; + u8 link_status[DP_LINK_STATUS_SIZE]; +}; + +static int dp_aux_link_power_up(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 value; + int err; + + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + usleep_range(1000, 2000); + + return 0; +} + +static int dp_aux_link_power_down(struct drm_dp_aux *aux, + struct dp_link_info *link) +{ + u8 value; + int err; + + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} + +static int dp_link_get_period(struct dp_link_private *link, int const addr) +{ + int ret = 0; + u8 data; + u32 const max_audio_period = 0xA; + + /* TEST_AUDIO_PERIOD_CH_XX */ + if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) { + DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr); + ret = -EINVAL; + goto exit; + } + + /* Period - Bits 3:0 */ + data = data & 0xF; + if ((int)data > max_audio_period) { + DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + ret = data; +exit: + return ret; +} + +static int dp_link_parse_audio_channel_period(struct dp_link_private *link) +{ + int ret = 0; + struct dp_link_test_audio *req = &link->dp_link.test_audio; + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_1 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_2 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret); + + /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */ + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_3 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_4 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_5 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_6 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_7 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret); + + ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8); + if (ret == -EINVAL) + goto exit; + + req->test_audio_period_ch_8 = ret; + drm_dbg_dp(link->drm_dev, "test_audio_period_ch_8 = 0x%x\n", ret); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_type(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + int const max_audio_pattern_type = 0x1; + + rlen = drm_dp_dpcd_readb(link->aux, + DP_TEST_AUDIO_PATTERN_TYPE, &data); + if (rlen < 0) { + DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen); + return rlen; + } + + /* Audio Pattern Type - Bits 7:0 */ + if ((int)data > max_audio_pattern_type) { + DRM_ERROR("invalid audio pattern type = 0x%x\n", data); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_pattern_type = data; + drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data); +exit: + return ret; +} + +static int dp_link_parse_audio_mode(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + int const max_audio_sampling_rate = 0x6; + int const max_audio_channel_count = 0x8; + int sampling_rate = 0x0; + int channel_count = 0x0; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data); + if (rlen < 0) { + DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen); + return rlen; + } + + /* Sampling Rate - Bits 3:0 */ + sampling_rate = data & 0xF; + if (sampling_rate > max_audio_sampling_rate) { + DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n", + sampling_rate, max_audio_sampling_rate); + ret = -EINVAL; + goto exit; + } + + /* Channel Count - Bits 7:4 */ + channel_count = ((data & 0xF0) >> 4) + 1; + if (channel_count > max_audio_channel_count) { + DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n", + channel_count, max_audio_channel_count); + ret = -EINVAL; + goto exit; + } + + link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate; + link->dp_link.test_audio.test_audio_channel_count = channel_count; + drm_dbg_dp(link->drm_dev, + "sampling_rate = 0x%x, channel_count = 0x%x\n", + sampling_rate, channel_count); +exit: + return ret; +} + +static int dp_link_parse_audio_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + + ret = dp_link_parse_audio_mode(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_pattern_type(link); + if (ret) + goto exit; + + ret = dp_link_parse_audio_channel_period(link); + +exit: + return ret; +} + +static bool dp_link_is_video_pattern_valid(u32 pattern) +{ + switch (pattern) { + case DP_NO_TEST_PATTERN: + case DP_COLOR_RAMP: + case DP_BLACK_AND_WHITE_VERTICAL_LINES: + case DP_COLOR_SQUARE: + return true; + default: + return false; + } +} + +/** + * dp_link_is_bit_depth_valid() - validates the bit depth requested + * @tbd: bit depth requested by the sink + * + * Returns true if the requested bit depth is supported. + */ +static bool dp_link_is_bit_depth_valid(u32 tbd) +{ + /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + case DP_TEST_BIT_DEPTH_8: + case DP_TEST_BIT_DEPTH_10: + return true; + default: + return false; + } +} + +static int dp_link_parse_timing_params1(struct dp_link_private *link, + int addr, int len, u32 *val) +{ + u8 bp[2]; + int rlen; + + if (len != 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux, addr, bp, len); + if (rlen < len) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val = bp[1] | (bp[0] << 8); + + return 0; +} + +static int dp_link_parse_timing_params2(struct dp_link_private *link, + int addr, int len, + u32 *val1, u32 *val2) +{ + u8 bp[2]; + int rlen; + + if (len != 2) + return -EINVAL; + + /* Read the requested video link pattern (Byte 0x221). */ + rlen = drm_dp_dpcd_read(link->aux, addr, bp, len); + if (rlen < len) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + + *val1 = (bp[0] & BIT(7)) >> 7; + *val2 = bp[1] | ((bp[0] & 0x7F) << 8); + + return 0; +} + +static int dp_link_parse_timing_params3(struct dp_link_private *link, + int addr, u32 *val) +{ + u8 bp; + u32 len = 1; + int rlen; + + rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len); + if (rlen < 1) { + DRM_ERROR("failed to read 0x%x\n", addr); + return -EINVAL; + } + *val = bp; + + return 0; +} + +/** + * dp_link_parse_video_pattern_params() - parses video pattern parameters from DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the video link pattern and the link + * bit depth requested by the sink and, and if the values parsed are valid. + */ +static int dp_link_parse_video_pattern_params(struct dp_link_private *link) +{ + int ret = 0; + ssize_t rlen; + u8 bp; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link video pattern. rlen=%zd\n", + rlen); + return rlen; + } + + if (!dp_link_is_video_pattern_valid(bp)) { + DRM_ERROR("invalid link video pattern = 0x%x\n", bp); + ret = -EINVAL; + return ret; + } + + link->dp_link.test_video.test_video_pattern = bp; + + /* Read the requested color bit depth and dynamic range (Byte 0x232) */ + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen); + return rlen; + } + + /* Dynamic Range */ + link->dp_link.test_video.test_dyn_range = + (bp & DP_TEST_DYNAMIC_RANGE_CEA); + + /* Color bit depth */ + bp &= DP_TEST_BIT_DEPTH_MASK; + if (!dp_link_is_bit_depth_valid(bp)) { + DRM_ERROR("invalid link bit depth = 0x%x\n", bp); + ret = -EINVAL; + return ret; + } + + link->dp_link.test_video.test_bit_depth = bp; + + /* resolution timing params */ + ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2, + &link->dp_link.test_video.test_h_total); + if (ret) { + DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2, + &link->dp_link.test_video.test_v_total); + if (ret) { + DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2, + &link->dp_link.test_video.test_h_start); + if (ret) { + DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2, + &link->dp_link.test_video.test_v_start); + if (ret) { + DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2, + &link->dp_link.test_video.test_hsync_pol, + &link->dp_link.test_video.test_hsync_width); + if (ret) { + DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2, + &link->dp_link.test_video.test_vsync_pol, + &link->dp_link.test_video.test_vsync_width); + if (ret) { + DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2, + &link->dp_link.test_video.test_h_width); + if (ret) { + DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n"); + return ret; + } + + ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2, + &link->dp_link.test_video.test_v_height); + if (ret) { + DRM_ERROR("failed to parse test_v_height\n"); + return ret; + } + + ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1, + &link->dp_link.test_video.test_rr_d); + link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR; + if (ret) { + DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n"); + return ret; + } + + ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR, + &link->dp_link.test_video.test_rr_n); + if (ret) { + DRM_ERROR("failed to parse test_rr_n\n"); + return ret; + } + + drm_dbg_dp(link->drm_dev, + "link video pattern = 0x%x\n" + "link dynamic range = 0x%x\n" + "link bit depth = 0x%x\n" + "TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n" + "TEST_H_START = %d, TEST_V_START = %d\n" + "TEST_HSYNC_POL = %d\n" + "TEST_HSYNC_WIDTH = %d\n" + "TEST_VSYNC_POL = %d\n" + "TEST_VSYNC_WIDTH = %d\n" + "TEST_H_WIDTH = %d\n" + "TEST_V_HEIGHT = %d\n" + "TEST_REFRESH_DENOMINATOR = %d\n" + "TEST_REFRESH_NUMERATOR = %d\n", + link->dp_link.test_video.test_video_pattern, + link->dp_link.test_video.test_dyn_range, + link->dp_link.test_video.test_bit_depth, + link->dp_link.test_video.test_h_total, + link->dp_link.test_video.test_v_total, + link->dp_link.test_video.test_h_start, + link->dp_link.test_video.test_v_start, + link->dp_link.test_video.test_hsync_pol, + link->dp_link.test_video.test_hsync_width, + link->dp_link.test_video.test_vsync_pol, + link->dp_link.test_video.test_vsync_width, + link->dp_link.test_video.test_h_width, + link->dp_link.test_video.test_v_height, + link->dp_link.test_video.test_rr_d, + link->dp_link.test_video.test_rr_n); + + return ret; +} + +/** + * dp_link_parse_link_training_params() - parses link training parameters from + * DPCD + * @link: Display Port Driver data + * + * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane + * count (Byte 0x220), and if these values parse are valid. + */ +static int dp_link_parse_link_training_params(struct dp_link_private *link) +{ + u8 bp; + ssize_t rlen; + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen); + return rlen; + } + + if (!is_link_rate_valid(bp)) { + DRM_ERROR("invalid link rate = 0x%x\n", bp); + return -EINVAL; + } + + link->request.test_link_rate = bp; + drm_dbg_dp(link->drm_dev, "link rate = 0x%x\n", + link->request.test_link_rate); + + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp); + if (rlen < 0) { + DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen); + return rlen; + } + bp &= DP_MAX_LANE_COUNT_MASK; + + if (!is_lane_count_valid(bp)) { + DRM_ERROR("invalid lane count = 0x%x\n", bp); + return -EINVAL; + } + + link->request.test_lane_count = bp; + drm_dbg_dp(link->drm_dev, "lane count = 0x%x\n", + link->request.test_lane_count); + return 0; +} + +/** + * dp_link_parse_phy_test_params() - parses the phy link parameters + * @link: Display Port Driver data + * + * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being + * requested. + */ +static int dp_link_parse_phy_test_params(struct dp_link_private *link) +{ + u8 data; + ssize_t rlen; + + rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN, + &data); + if (rlen < 0) { + DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen); + return rlen; + } + + link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07; + + drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data); + + switch (data) { + case DP_PHY_TEST_PATTERN_SEL_MASK: + case DP_PHY_TEST_PATTERN_NONE: + case DP_PHY_TEST_PATTERN_D10_2: + case DP_PHY_TEST_PATTERN_ERROR_COUNT: + case DP_PHY_TEST_PATTERN_PRBS7: + case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: + case DP_PHY_TEST_PATTERN_CP2520: + return 0; + default: + return -EINVAL; + } +} + +/** + * dp_link_is_video_audio_test_requested() - checks for audio/video link request + * @link: link requested by the sink + * + * Returns true if the requested link is a permitted audio/video link. + */ +static bool dp_link_is_video_audio_test_requested(u32 link) +{ + u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN | + DP_TEST_LINK_AUDIO_PATTERN | + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); + + return ((link & video_audio_test) && + !(link & ~video_audio_test)); +} + +/** + * dp_link_parse_request() - parses link request parameters from sink + * @link: Display Port Driver data + * + * Parses the DPCD to check if an automated link is requested (Byte 0x201), + * and what type of link automation is being requested (Byte 0x218). + */ +static int dp_link_parse_request(struct dp_link_private *link) +{ + int ret = 0; + u8 data; + ssize_t rlen; + + /** + * Read the device service IRQ vector (Byte 0x201) to determine + * whether an automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_readb(link->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, &data); + if (rlen < 0) { + DRM_ERROR("aux read failed. rlen=%zd\n", rlen); + return rlen; + } + + drm_dbg_dp(link->drm_dev, "device service irq vector = 0x%x\n", data); + + if (!(data & DP_AUTOMATED_TEST_REQUEST)) { + drm_dbg_dp(link->drm_dev, "no test requested\n"); + return 0; + } + + /** + * Read the link request byte (Byte 0x218) to determine what type + * of automated link has been requested by the sink. + */ + rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data); + if (rlen < 0) { + DRM_ERROR("aux read failed. rlen=%zd\n", rlen); + return rlen; + } + + if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) { + drm_dbg_dp(link->drm_dev, "link 0x%x not supported\n", data); + goto end; + } + + drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data); + link->request.test_requested = data; + if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) { + ret = dp_link_parse_phy_test_params(link); + if (ret) + goto end; + ret = dp_link_parse_link_training_params(link); + if (ret) + goto end; + } + + if (link->request.test_requested == DP_TEST_LINK_TRAINING) { + ret = dp_link_parse_link_training_params(link); + if (ret) + goto end; + } + + if (dp_link_is_video_audio_test_requested( + link->request.test_requested)) { + ret = dp_link_parse_video_pattern_params(link); + if (ret) + goto end; + + ret = dp_link_parse_audio_pattern_params(link); + } +end: + /* + * Send a DP_TEST_ACK if all link parameters are valid, otherwise send + * a DP_TEST_NAK. + */ + if (ret) { + link->dp_link.test_response = DP_TEST_NAK; + } else { + if (link->request.test_requested != DP_TEST_LINK_EDID_READ) + link->dp_link.test_response = DP_TEST_ACK; + else + link->dp_link.test_response = + DP_TEST_EDID_CHECKSUM_WRITE; + } + + return ret; +} + +/** + * dp_link_parse_sink_count() - parses the sink count + * @dp_link: pointer to link module data + * + * Parses the DPCD to check if there is an update to the sink count + * (Byte 0x200), and whether all the sink devices connected have Content + * Protection enabled. + */ +static int dp_link_parse_sink_count(struct dp_link *dp_link) +{ + ssize_t rlen; + bool cp_ready; + + struct dp_link_private *link = container_of(dp_link, + struct dp_link_private, dp_link); + + rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT, + &link->dp_link.sink_count); + if (rlen < 0) { + DRM_ERROR("sink count read failed. rlen=%zd\n", rlen); + return rlen; + } + + cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY; + + link->dp_link.sink_count = + DP_GET_SINK_COUNT(link->dp_link.sink_count); + + drm_dbg_dp(link->drm_dev, "sink_count = 0x%x, cp_ready = 0x%x\n", + link->dp_link.sink_count, cp_ready); + return 0; +} + +static int dp_link_parse_sink_status_field(struct dp_link_private *link) +{ + int len = 0; + + link->prev_sink_count = link->dp_link.sink_count; + len = dp_link_parse_sink_count(&link->dp_link); + if (len < 0) { + DRM_ERROR("DP parse sink count failed\n"); + return len; + } + + len = drm_dp_dpcd_read_link_status(link->aux, + link->link_status); + if (len < DP_LINK_STATUS_SIZE) { + DRM_ERROR("DP link status read failed\n"); + return len; + } + + return dp_link_parse_request(link); +} + +/** + * dp_link_process_link_training_request() - processes new training requests + * @link: Display Port link data + * + * This function will handle new link training requests that are initiated by + * the sink. In particular, it will update the requested lane count and link + * rate, and then trigger the link retraining procedure. + * + * The function will return 0 if a link training request has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_training_request(struct dp_link_private *link) +{ + if (link->request.test_requested != DP_TEST_LINK_TRAINING) + return -EINVAL; + + drm_dbg_dp(link->drm_dev, + "Test:0x%x link rate = 0x%x, lane count = 0x%x\n", + DP_TEST_LINK_TRAINING, + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; + link->dp_link.link_params.rate = + drm_dp_bw_code_to_link_rate(link->request.test_link_rate); + + return 0; +} + +bool dp_link_send_test_response(struct dp_link *dp_link) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return false; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE, + dp_link->test_response); + + return ret == 1; +} + +int dp_link_psm_config(struct dp_link *dp_link, + struct dp_link_info *link_info, bool enable) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid params\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + mutex_lock(&link->psm_mutex); + if (enable) + ret = dp_aux_link_power_down(link->aux, link_info); + else + ret = dp_aux_link_power_up(link->aux, link_info); + + if (ret) + DRM_ERROR("Failed to %s low power mode\n", enable ? + "enter" : "exit"); + else + dp_link->psm_enabled = enable; + + mutex_unlock(&link->psm_mutex); + return ret; +} + +bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum) +{ + struct dp_link_private *link = NULL; + int ret = 0; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return false; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM, + checksum); + return ret == 1; +} + +static void dp_link_parse_vx_px(struct dp_link_private *link) +{ + drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n", + drm_dp_get_adjust_request_voltage(link->link_status, 0), + drm_dp_get_adjust_request_voltage(link->link_status, 1), + drm_dp_get_adjust_request_voltage(link->link_status, 2), + drm_dp_get_adjust_request_voltage(link->link_status, 3)); + + drm_dbg_dp(link->drm_dev, "px: 0=%d, 1=%d, 2=%d, 3=%d\n", + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2), + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3)); + + /** + * Update the voltage and pre-emphasis levels as per DPCD request + * vector. + */ + drm_dbg_dp(link->drm_dev, + "Current: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); + link->dp_link.phy_params.v_level = + drm_dp_get_adjust_request_voltage(link->link_status, 0); + link->dp_link.phy_params.p_level = + drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0); + + link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + drm_dbg_dp(link->drm_dev, + "Requested: v_level = 0x%x, p_level = 0x%x\n", + link->dp_link.phy_params.v_level, + link->dp_link.phy_params.p_level); +} + +/** + * dp_link_process_phy_test_pattern_request() - process new phy link requests + * @link: Display Port Driver data + * + * This function will handle new phy link pattern requests that are initiated + * by the sink. The function will return 0 if a phy link pattern has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_phy_test_pattern_request( + struct dp_link_private *link) +{ + if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) { + drm_dbg_dp(link->drm_dev, "no phy test\n"); + return -EINVAL; + } + + if (!is_link_rate_valid(link->request.test_link_rate) || + !is_lane_count_valid(link->request.test_lane_count)) { + DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n", + link->request.test_link_rate, + link->request.test_lane_count); + return -EINVAL; + } + + drm_dbg_dp(link->drm_dev, + "Current: rate = 0x%x, lane count = 0x%x\n", + link->dp_link.link_params.rate, + link->dp_link.link_params.num_lanes); + + drm_dbg_dp(link->drm_dev, + "Requested: rate = 0x%x, lane count = 0x%x\n", + link->request.test_link_rate, + link->request.test_lane_count); + + link->dp_link.link_params.num_lanes = link->request.test_lane_count; + link->dp_link.link_params.rate = + drm_dp_bw_code_to_link_rate(link->request.test_link_rate); + + dp_link_parse_vx_px(link); + + return 0; +} + +static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +/** + * dp_link_process_link_status_update() - processes link status updates + * @link: Display Port link module data + * + * This function will check for changes in the link status, e.g. clock + * recovery done on all lanes, and trigger link training if there is a + * failure/error on the link. + * + * The function will return 0 if the a link status update has been processed, + * otherwise it will return -EINVAL. + */ +static int dp_link_process_link_status_update(struct dp_link_private *link) +{ + bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status, + link->dp_link.link_params.num_lanes); + + bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status, + link->dp_link.link_params.num_lanes); + + drm_dbg_dp(link->drm_dev, + "channel_eq_done = %d, clock_recovery_done = %d\n", + channel_eq_done, clock_recovery_done); + + if (channel_eq_done && clock_recovery_done) + return -EINVAL; + + return 0; +} + +/** + * dp_link_process_ds_port_status_change() - process port status changes + * @link: Display Port Driver data + * + * This function will handle downstream port updates that are initiated by + * the sink. If the downstream port status has changed, the EDID is read via + * AUX. + * + * The function will return 0 if a downstream port update has been + * processed, otherwise it will return -EINVAL. + */ +static int dp_link_process_ds_port_status_change(struct dp_link_private *link) +{ + if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) & + DP_DOWNSTREAM_PORT_STATUS_CHANGED) + goto reset; + + if (link->prev_sink_count == link->dp_link.sink_count) + return -EINVAL; + +reset: + /* reset prev_sink_count */ + link->prev_sink_count = link->dp_link.sink_count; + + return 0; +} + +static bool dp_link_is_video_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN) + && !(link->request.test_requested & + DP_TEST_LINK_AUDIO_DISABLED_VIDEO); +} + +static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link) +{ + return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN); +} + +static void dp_link_reset_data(struct dp_link_private *link) +{ + link->request = (const struct dp_link_request){ 0 }; + link->dp_link.test_video = (const struct dp_link_test_video){ 0 }; + link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN; + link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 }; + link->dp_link.phy_params.phy_test_pattern_sel = 0; + link->dp_link.sink_request = 0; + link->dp_link.test_response = 0; +} + +/** + * dp_link_process_request() - handle HPD IRQ transition to HIGH + * @dp_link: pointer to link module data + * + * This function will handle the HPD IRQ state transitions from LOW to HIGH + * (including cases when there are back to back HPD IRQ HIGH) indicating + * the start of a new link training request or sink status update. + */ +int dp_link_process_request(struct dp_link *dp_link) +{ + int ret = 0; + struct dp_link_private *link; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + dp_link_reset_data(link); + + ret = dp_link_parse_sink_status_field(link); + if (ret) + return ret; + + if (link->request.test_requested == DP_TEST_LINK_EDID_READ) { + dp_link->sink_request |= DP_TEST_LINK_EDID_READ; + } else if (!dp_link_process_ds_port_status_change(link)) { + dp_link->sink_request |= DS_PORT_STATUS_CHANGED; + } else if (!dp_link_process_link_training_request(link)) { + dp_link->sink_request |= DP_TEST_LINK_TRAINING; + } else if (!dp_link_process_phy_test_pattern_request(link)) { + dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN; + } else { + ret = dp_link_process_link_status_update(link); + if (!ret) { + dp_link->sink_request |= DP_LINK_STATUS_UPDATED; + } else { + if (dp_link_is_video_pattern_requested(link)) { + ret = 0; + dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN; + } + if (dp_link_is_audio_pattern_requested(link)) { + dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN; + ret = -EINVAL; + } + } + } + + drm_dbg_dp(link->drm_dev, "sink request=%#x\n", + dp_link->sink_request); + return ret; +} + +int dp_link_get_colorimetry_config(struct dp_link *dp_link) +{ + u32 cc; + struct dp_link_private *link; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* + * Unless a video pattern CTS test is ongoing, use RGB_VESA + * Only RGB_VESA and RGB_CEA supported for now + */ + if (dp_link_is_video_pattern_requested(link)) + cc = link->dp_link.test_video.test_dyn_range; + else + cc = DP_TEST_DYNAMIC_RANGE_VESA; + + return cc; +} + +int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status) +{ + int i; + int v_max = 0, p_max = 0; + struct dp_link_private *link; + + if (!dp_link) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + link = container_of(dp_link, struct dp_link_private, dp_link); + + /* use the max level across lanes */ + for (i = 0; i < dp_link->link_params.num_lanes; i++) { + u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i); + u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status, + i); + drm_dbg_dp(link->drm_dev, + "lane=%d req_vol_swing=%d req_pre_emphasis=%d\n", + i, data_v, data_p); + if (v_max < data_v) + v_max = data_v; + if (p_max < data_p) + p_max = data_p; + } + + dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT; + dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT; + + /** + * Adjust the voltage swing and pre-emphasis level combination to within + * the allowable range. + */ + if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) { + drm_dbg_dp(link->drm_dev, + "Requested vSwingLevel=%d, change to %d\n", + dp_link->phy_params.v_level, + DP_TRAIN_VOLTAGE_SWING_MAX); + dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX; + } + + if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) { + drm_dbg_dp(link->drm_dev, + "Requested preEmphasisLevel=%d, change to %d\n", + dp_link->phy_params.p_level, + DP_TRAIN_PRE_EMPHASIS_MAX); + dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX; + } + + if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1) + && (dp_link->phy_params.v_level == + DP_TRAIN_VOLTAGE_SWING_LVL_2)) { + drm_dbg_dp(link->drm_dev, + "Requested preEmphasisLevel=%d, change to %d\n", + dp_link->phy_params.p_level, + DP_TRAIN_PRE_EMPHASIS_LVL_1); + dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1; + } + + drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n", + dp_link->phy_params.v_level, dp_link->phy_params.p_level); + + return 0; +} + +void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link) +{ + dp_link->phy_params.v_level = 0; + dp_link->phy_params.p_level = 0; +} + +u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp) +{ + u32 tbd; + + /* + * Few simplistic rules and assumptions made here: + * 1. Test bit depth is bit depth per color component + * 2. Assume 3 color components + */ + switch (bpp) { + case 18: + tbd = DP_TEST_BIT_DEPTH_6; + break; + case 24: + tbd = DP_TEST_BIT_DEPTH_8; + break; + case 30: + tbd = DP_TEST_BIT_DEPTH_10; + break; + default: + tbd = DP_TEST_BIT_DEPTH_UNKNOWN; + break; + } + + if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN) + tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT); + + return tbd; +} + +struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux) +{ + struct dp_link_private *link; + struct dp_link *dp_link; + + if (!dev || !aux) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL); + if (!link) + return ERR_PTR(-ENOMEM); + + link->dev = dev; + link->aux = aux; + + mutex_init(&link->psm_mutex); + dp_link = &link->dp_link; + + return dp_link; +} diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h new file mode 100644 index 000000000..9dd4dd926 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_link.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_LINK_H_ +#define _DP_LINK_H_ + +#include "dp_aux.h" + +#define DS_PORT_STATUS_CHANGED 0x200 +#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF +#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) + +struct dp_link_info { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +enum dp_link_voltage_level { + DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0, + DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1, + DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2, + DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2, +}; + +enum dp_link_preemaphasis_level { + DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0, + DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1, + DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2, + DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2, +}; + +struct dp_link_test_video { + u32 test_video_pattern; + u32 test_bit_depth; + u32 test_dyn_range; + u32 test_h_total; + u32 test_v_total; + u32 test_h_start; + u32 test_v_start; + u32 test_hsync_pol; + u32 test_hsync_width; + u32 test_vsync_pol; + u32 test_vsync_width; + u32 test_h_width; + u32 test_v_height; + u32 test_rr_d; + u32 test_rr_n; +}; + +struct dp_link_test_audio { + u32 test_audio_sampling_rate; + u32 test_audio_channel_count; + u32 test_audio_pattern_type; + u32 test_audio_period_ch_1; + u32 test_audio_period_ch_2; + u32 test_audio_period_ch_3; + u32 test_audio_period_ch_4; + u32 test_audio_period_ch_5; + u32 test_audio_period_ch_6; + u32 test_audio_period_ch_7; + u32 test_audio_period_ch_8; +}; + +struct dp_link_phy_params { + u32 phy_test_pattern_sel; + u8 v_level; + u8 p_level; +}; + +struct dp_link { + u32 sink_request; + u32 test_response; + bool psm_enabled; + + u8 sink_count; + struct dp_link_test_video test_video; + struct dp_link_test_audio test_audio; + struct dp_link_phy_params phy_params; + struct dp_link_info link_params; +}; + +/** + * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp + * @tbd: test bit depth + * + * Returns the bits per pixel (bpp) to be used corresponding to the + * git bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpp(u32 tbd) +{ + /* + * Few simplistic rules and assumptions made here: + * 1. Bit depth is per color component + * 2. If bit depth is unknown return 0 + * 3. Assume 3 color components + */ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return 18; + case DP_TEST_BIT_DEPTH_8: + return 24; + case DP_TEST_BIT_DEPTH_10: + return 30; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return 0; + } +} + +/** + * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc + * @tbd: test bit depth + * + * Returns the bits per comp (bpc) to be used corresponding to the + * bit depth value. This function assumes that bit depth has + * already been validated. + */ +static inline u32 dp_link_bit_depth_to_bpc(u32 tbd) +{ + switch (tbd) { + case DP_TEST_BIT_DEPTH_6: + return 6; + case DP_TEST_BIT_DEPTH_8: + return 8; + case DP_TEST_BIT_DEPTH_10: + return 10; + case DP_TEST_BIT_DEPTH_UNKNOWN: + default: + return 0; + } +} + +void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link); +u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp); +int dp_link_process_request(struct dp_link *dp_link); +int dp_link_get_colorimetry_config(struct dp_link *dp_link); +int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status); +bool dp_link_send_test_response(struct dp_link *dp_link); +int dp_link_psm_config(struct dp_link *dp_link, + struct dp_link_info *link_info, bool enable); +bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum); + +/** + * dp_link_get() - get the functionalities of dp test module + * + * + * return: a pointer to dp_link struct + */ +struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux); + +#endif /* _DP_LINK_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c new file mode 100644 index 000000000..d38086650 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include "dp_panel.h" + +#include +#include +#include + +struct dp_panel_private { + struct device *dev; + struct drm_device *drm_dev; + struct dp_panel dp_panel; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_catalog *catalog; + bool panel_on; + bool aux_cfg_update_done; +}; + +static int dp_panel_read_dpcd(struct dp_panel *dp_panel) +{ + int rc = 0; + size_t len; + ssize_t rlen; + struct dp_panel_private *panel; + struct dp_link_info *link_info; + u8 *dpcd, major = 0, minor = 0, temp; + u32 offset = DP_DPCD_REV; + + dpcd = dp_panel->dpcd; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + link_info = &dp_panel->link_info; + + rlen = drm_dp_dpcd_read(panel->aux, offset, + dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL]; + + /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */ + if (temp & BIT(7)) { + drm_dbg_dp(panel->drm_dev, + "using EXTENDED_RECEIVER_CAPABILITY_FIELD\n"); + offset = DPRX_EXTENDED_DPCD_FIELD; + } + + rlen = drm_dp_dpcd_read(panel->aux, offset, + dpcd, (DP_RECEIVER_CAP_SIZE + 1)); + if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) { + DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen); + if (rlen == -ETIMEDOUT) + rc = rlen; + else + rc = -EINVAL; + + goto end; + } + + link_info->revision = dpcd[DP_DPCD_REV]; + major = (link_info->revision >> 4) & 0x0f; + minor = link_info->revision & 0x0f; + + link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]); + link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + + if (link_info->num_lanes > dp_panel->max_dp_lanes) + link_info->num_lanes = dp_panel->max_dp_lanes; + + /* Limit support upto HBR2 until HBR3 support is added */ + if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4))) + link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4); + + drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor); + drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate); + drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes); + + if (drm_dp_enhanced_frame_cap(dpcd)) + link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT]; + dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT; + + if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) { + dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT]; + dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK; + len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE; + + rlen = drm_dp_dpcd_read(panel->aux, + DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len); + if (rlen < len) { + DRM_ERROR("ds port status failed, rlen=%zd\n", rlen); + rc = -EINVAL; + goto end; + } + } + +end: + return rc; +} + +static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_link_info *link_info; + const u32 max_supported_bpp = 30, min_supported_bpp = 18; + u32 bpp = 0, data_rate_khz = 0; + + bpp = min_t(u32, mode_edid_bpp, max_supported_bpp); + + link_info = &dp_panel->link_info; + data_rate_khz = link_info->num_lanes * link_info->rate * 8; + + while (bpp > min_supported_bpp) { + if (mode_pclk_khz * bpp <= data_rate_khz) + break; + bpp -= 6; + } + + return bpp; +} + +static int dp_panel_update_modes(struct drm_connector *connector, + struct edid *edid) +{ + int rc = 0; + + if (edid) { + rc = drm_connector_update_edid_property(connector, edid); + if (rc) { + DRM_ERROR("failed to update edid property %d\n", rc); + return rc; + } + rc = drm_add_edid_modes(connector, edid); + return rc; + } + + rc = drm_connector_update_edid_property(connector, NULL); + if (rc) + DRM_ERROR("failed to update edid property %d\n", rc); + + return rc; +} + +int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector) +{ + int rc = 0, bw_code; + int rlen, count; + struct dp_panel_private *panel; + + if (!dp_panel || !connector) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + rc = dp_panel_read_dpcd(dp_panel); + if (rc) { + DRM_ERROR("read dpcd failed %d\n", rc); + return rc; + } + + bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + if (!is_link_rate_valid(bw_code) || + !is_lane_count_valid(dp_panel->link_info.num_lanes) || + (bw_code > dp_panel->max_bw_code)) { + DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate, + dp_panel->link_info.num_lanes); + return -EINVAL; + } + + if (dp_panel->dfp_present) { + rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT, + &count, 1); + if (rlen == 1) { + count = DP_GET_SINK_COUNT(count); + if (!count) { + DRM_ERROR("no downstream ports connected\n"); + panel->link->sink_count = 0; + rc = -ENOTCONN; + goto end; + } + } + } + + kfree(dp_panel->edid); + dp_panel->edid = NULL; + + dp_panel->edid = drm_get_edid(connector, + &panel->aux->ddc); + if (!dp_panel->edid) { + DRM_ERROR("panel edid read failed\n"); + /* check edid read fail is due to unplug */ + if (!dp_catalog_link_is_connected(panel->catalog)) { + rc = -ETIMEDOUT; + goto end; + } + } + + if (panel->aux_cfg_update_done) { + drm_dbg_dp(panel->drm_dev, + "read DPCD with updated AUX config\n"); + rc = dp_panel_read_dpcd(dp_panel); + bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate); + if (rc || !is_link_rate_valid(bw_code) || + !is_lane_count_valid(dp_panel->link_info.num_lanes) + || (bw_code > dp_panel->max_bw_code)) { + DRM_ERROR("read dpcd failed %d\n", rc); + return rc; + } + panel->aux_cfg_update_done = false; + } +end: + return rc; +} + +u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, + u32 mode_edid_bpp, u32 mode_pclk_khz) +{ + struct dp_panel_private *panel; + u32 bpp; + + if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { + DRM_ERROR("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (dp_panel->video_test) + bpp = dp_link_bit_depth_to_bpp( + panel->link->test_video.test_bit_depth); + else + bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp, + mode_pclk_khz); + + return bpp; +} + +int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector) +{ + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + if (dp_panel->edid) + return dp_panel_update_modes(connector, dp_panel->edid); + + return 0; +} + +static u8 dp_panel_get_edid_checksum(struct edid *edid) +{ + edid += edid->extensions; + + return edid->checksum; +} + +void dp_panel_handle_sink_request(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) { + u8 checksum; + + if (dp_panel->edid) + checksum = dp_panel_get_edid_checksum(dp_panel->edid); + else + checksum = dp_panel->connector->real_edid_checksum; + + dp_link_send_edid_checksum(panel->link, checksum); + dp_link_send_test_response(panel->link); + } +} + +void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable) +{ + struct dp_catalog *catalog; + struct dp_panel_private *panel; + + if (!dp_panel) { + DRM_ERROR("invalid input\n"); + return; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + if (!panel->panel_on) { + drm_dbg_dp(panel->drm_dev, + "DP panel not enabled, handle TPG on next on\n"); + return; + } + + if (!enable) { + dp_catalog_panel_tpg_disable(catalog); + return; + } + + drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n"); + dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode); +} + +void dp_panel_dump_regs(struct dp_panel *dp_panel) +{ + struct dp_catalog *catalog; + struct dp_panel_private *panel; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + + dp_catalog_dump_regs(catalog); +} + +int dp_panel_timing_cfg(struct dp_panel *dp_panel) +{ + u32 data, total_ver, total_hor; + struct dp_catalog *catalog; + struct dp_panel_private *panel; + struct drm_display_mode *drm_mode; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + catalog = panel->catalog; + drm_mode = &panel->dp_panel.dp_mode.drm_mode; + + drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n", + drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end, + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->hsync_end - drm_mode->hsync_start); + + drm_dbg_dp(panel->drm_dev, "height=%d vporch= %d %d %d\n", + drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end, + drm_mode->vsync_start - drm_mode->vdisplay, + drm_mode->vsync_end - drm_mode->vsync_start); + + total_hor = drm_mode->htotal; + + total_ver = drm_mode->vtotal; + + data = total_ver; + data <<= 16; + data |= total_hor; + + catalog->total = data; + + data = (drm_mode->vtotal - drm_mode->vsync_start); + data <<= 16; + data |= (drm_mode->htotal - drm_mode->hsync_start); + + catalog->sync_start = data; + + data = drm_mode->vsync_end - drm_mode->vsync_start; + data <<= 16; + data |= (panel->dp_panel.dp_mode.v_active_low << 31); + data |= drm_mode->hsync_end - drm_mode->hsync_start; + data |= (panel->dp_panel.dp_mode.h_active_low << 15); + + catalog->width_blanking = data; + + data = drm_mode->vdisplay; + data <<= 16; + data |= drm_mode->hdisplay; + + catalog->dp_active = data; + + dp_catalog_panel_timing_cfg(catalog); + panel->panel_on = true; + + return 0; +} + +int dp_panel_init_panel_info(struct dp_panel *dp_panel) +{ + struct drm_display_mode *drm_mode; + struct dp_panel_private *panel; + + drm_mode = &dp_panel->dp_mode.drm_mode; + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + + /* + * print resolution info as this is a result + * of user initiated action of cable connection + */ + drm_dbg_dp(panel->drm_dev, "SET NEW RESOLUTION:\n"); + drm_dbg_dp(panel->drm_dev, "%dx%d@%dfps\n", + drm_mode->hdisplay, drm_mode->vdisplay, drm_mode_vrefresh(drm_mode)); + drm_dbg_dp(panel->drm_dev, + "h_porches(back|front|width) = (%d|%d|%d)\n", + drm_mode->htotal - drm_mode->hsync_end, + drm_mode->hsync_start - drm_mode->hdisplay, + drm_mode->hsync_end - drm_mode->hsync_start); + drm_dbg_dp(panel->drm_dev, + "v_porches(back|front|width) = (%d|%d|%d)\n", + drm_mode->vtotal - drm_mode->vsync_end, + drm_mode->vsync_start - drm_mode->vdisplay, + drm_mode->vsync_end - drm_mode->vsync_start); + drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n", + drm_mode->clock); + drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp); + + dp_panel->dp_mode.bpp = max_t(u32, 18, + min_t(u32, dp_panel->dp_mode.bpp, 30)); + drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n", + dp_panel->dp_mode.bpp); + + return 0; +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in) +{ + struct dp_panel_private *panel; + struct dp_panel *dp_panel; + + if (!in->dev || !in->catalog || !in->aux || !in->link) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL); + if (!panel) + return ERR_PTR(-ENOMEM); + + panel->dev = in->dev; + panel->aux = in->aux; + panel->catalog = in->catalog; + panel->link = in->link; + + dp_panel = &panel->dp_panel; + dp_panel->max_bw_code = DP_LINK_BW_8_1; + panel->aux_cfg_update_done = false; + + return dp_panel; +} + +void dp_panel_put(struct dp_panel *dp_panel) +{ + if (!dp_panel) + return; + + kfree(dp_panel->edid); +} diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h new file mode 100644 index 000000000..d861197ac --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PANEL_H_ +#define _DP_PANEL_H_ + +#include + +#include "dp_aux.h" +#include "dp_link.h" +#include "dp_hpd.h" + +struct edid; + +#define DPRX_EXTENDED_DPCD_FIELD 0x2200 + +#define DP_DOWNSTREAM_PORTS 4 +#define DP_DOWNSTREAM_CAP_SIZE 4 + +struct dp_display_mode { + struct drm_display_mode drm_mode; + u32 capabilities; + u32 bpp; + u32 h_active_low; + u32 v_active_low; +}; + +struct dp_panel_in { + struct device *dev; + struct drm_dp_aux *aux; + struct dp_link *link; + struct dp_catalog *catalog; +}; + +struct dp_panel { + /* dpcd raw data */ + u8 dpcd[DP_RECEIVER_CAP_SIZE + 1]; + u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE]; + u32 ds_port_cnt; + u32 dfp_present; + + struct dp_link_info link_info; + struct drm_dp_desc desc; + struct edid *edid; + struct drm_connector *connector; + struct dp_display_mode dp_mode; + bool video_test; + + u32 vic; + u32 max_dp_lanes; + + u32 max_bw_code; +}; + +int dp_panel_init_panel_info(struct dp_panel *dp_panel); +int dp_panel_deinit(struct dp_panel *dp_panel); +int dp_panel_timing_cfg(struct dp_panel *dp_panel); +void dp_panel_dump_regs(struct dp_panel *dp_panel); +int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector); +u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, + u32 mode_pclk_khz); +int dp_panel_get_modes(struct dp_panel *dp_panel, + struct drm_connector *connector); +void dp_panel_handle_sink_request(struct dp_panel *dp_panel); +void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable); + +/** + * is_link_rate_valid() - validates the link rate + * @lane_rate: link rate requested by the sink + * + * Returns true if the requested link rate is supported. + */ +static inline bool is_link_rate_valid(u32 bw_code) +{ + return (bw_code == DP_LINK_BW_1_62 || + bw_code == DP_LINK_BW_2_7 || + bw_code == DP_LINK_BW_5_4 || + bw_code == DP_LINK_BW_8_1); +} + +/** + * dp_link_is_lane_count_valid() - validates the lane count + * @lane_count: lane count requested by the sink + * + * Returns true if the requested lane count is supported. + */ +static inline bool is_lane_count_valid(u32 lane_count) +{ + return (lane_count == 1 || + lane_count == 2 || + lane_count == 4); +} + +struct dp_panel *dp_panel_get(struct dp_panel_in *in); +void dp_panel_put(struct dp_panel *dp_panel); +#endif /* _DP_PANEL_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c new file mode 100644 index 000000000..dcbe893d6 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include +#include +#include + +#include "dp_parser.h" +#include "dp_reg.h" + +#define DP_DEFAULT_AHB_OFFSET 0x0000 +#define DP_DEFAULT_AHB_SIZE 0x0200 +#define DP_DEFAULT_AUX_OFFSET 0x0200 +#define DP_DEFAULT_AUX_SIZE 0x0200 +#define DP_DEFAULT_LINK_OFFSET 0x0400 +#define DP_DEFAULT_LINK_SIZE 0x0C00 +#define DP_DEFAULT_P0_OFFSET 0x1000 +#define DP_DEFAULT_P0_SIZE 0x0400 + +static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len) +{ + struct resource *res; + void __iomem *base; + + base = devm_platform_get_and_ioremap_resource(pdev, idx, &res); + if (!IS_ERR(base)) + *len = resource_size(res); + + return base; +} + +static int dp_parser_ctrl_res(struct dp_parser *parser) +{ + struct platform_device *pdev = parser->pdev; + struct dp_io *io = &parser->io; + struct dss_io_data *dss = &io->dp_controller; + + dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len); + if (IS_ERR(dss->ahb.base)) + return PTR_ERR(dss->ahb.base); + + dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len); + if (IS_ERR(dss->aux.base)) { + /* + * The initial binding had a single reg, but in order to + * support variation in the sub-region sizes this was split. + * dp_ioremap() will fail with -EINVAL here if only a single + * reg is specified, so fill in the sub-region offsets and + * lengths based on this single region. + */ + if (PTR_ERR(dss->aux.base) == -EINVAL) { + if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) { + DRM_ERROR("legacy memory region not large enough\n"); + return -EINVAL; + } + + dss->ahb.len = DP_DEFAULT_AHB_SIZE; + dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET; + dss->aux.len = DP_DEFAULT_AUX_SIZE; + dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET; + dss->link.len = DP_DEFAULT_LINK_SIZE; + dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET; + dss->p0.len = DP_DEFAULT_P0_SIZE; + } else { + DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base); + return PTR_ERR(dss->aux.base); + } + } else { + dss->link.base = dp_ioremap(pdev, 2, &dss->link.len); + if (IS_ERR(dss->link.base)) { + DRM_ERROR("unable to remap link region: %pe\n", dss->link.base); + return PTR_ERR(dss->link.base); + } + + dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len); + if (IS_ERR(dss->p0.base)) { + DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base); + return PTR_ERR(dss->p0.base); + } + } + + io->phy = devm_phy_get(&pdev->dev, "dp"); + if (IS_ERR(io->phy)) + return PTR_ERR(io->phy); + + return 0; +} + +static int dp_parser_misc(struct dp_parser *parser) +{ + struct device_node *of_node = parser->pdev->dev.of_node; + int len; + + len = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES); + if (len < 0) { + DRM_WARN("Invalid property \"data-lanes\", default max DP lanes = %d\n", + DP_MAX_NUM_DP_LANES); + len = DP_MAX_NUM_DP_LANES; + } + + parser->max_dp_lanes = len; + return 0; +} + +static inline bool dp_parser_check_prefix(const char *clk_prefix, + const char *clk_name) +{ + return !strncmp(clk_prefix, clk_name, strlen(clk_prefix)); +} + +static int dp_parser_init_clk_data(struct dp_parser *parser) +{ + int num_clk, i, rc; + int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM]; + struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM]; + + num_clk = of_property_count_strings(dev->of_node, "clock-names"); + if (num_clk <= 0) { + DRM_ERROR("no clocks are defined\n"); + return -EINVAL; + } + + for (i = 0; i < num_clk; i++) { + rc = of_property_read_string_index(dev->of_node, + "clock-names", i, &clk_name); + if (rc < 0) + return rc; + + if (dp_parser_check_prefix("core", clk_name)) + core_clk_count++; + + if (dp_parser_check_prefix("ctrl", clk_name)) + ctrl_clk_count++; + + if (dp_parser_check_prefix("stream", clk_name)) + stream_clk_count++; + } + + /* Initialize the CORE power module */ + if (core_clk_count == 0) { + DRM_ERROR("no core clocks are defined\n"); + return -EINVAL; + } + + core_power->num_clk = core_clk_count; + core_power->clocks = devm_kcalloc(dev, + core_power->num_clk, sizeof(struct clk_bulk_data), + GFP_KERNEL); + if (!core_power->clocks) + return -ENOMEM; + + /* Initialize the CTRL power module */ + if (ctrl_clk_count == 0) { + DRM_ERROR("no ctrl clocks are defined\n"); + return -EINVAL; + } + + ctrl_power->num_clk = ctrl_clk_count; + ctrl_power->clocks = devm_kcalloc(dev, + ctrl_power->num_clk, sizeof(struct clk_bulk_data), + GFP_KERNEL); + if (!ctrl_power->clocks) { + ctrl_power->num_clk = 0; + return -ENOMEM; + } + + /* Initialize the STREAM power module */ + if (stream_clk_count == 0) { + DRM_ERROR("no stream (pixel) clocks are defined\n"); + return -EINVAL; + } + + stream_power->num_clk = stream_clk_count; + stream_power->clocks = devm_kcalloc(dev, + stream_power->num_clk, sizeof(struct clk_bulk_data), + GFP_KERNEL); + if (!stream_power->clocks) { + stream_power->num_clk = 0; + return -ENOMEM; + } + + return 0; +} + +static int dp_parser_clock(struct dp_parser *parser) +{ + int rc = 0, i = 0; + int num_clk = 0; + int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0; + int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0; + const char *clk_name; + struct device *dev = &parser->pdev->dev; + struct dss_module_power *core_power = &parser->mp[DP_CORE_PM]; + struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM]; + struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM]; + + rc = dp_parser_init_clk_data(parser); + if (rc) { + DRM_ERROR("failed to initialize power data %d\n", rc); + return -EINVAL; + } + + core_clk_count = core_power->num_clk; + ctrl_clk_count = ctrl_power->num_clk; + stream_clk_count = stream_power->num_clk; + + num_clk = core_clk_count + ctrl_clk_count + stream_clk_count; + + for (i = 0; i < num_clk; i++) { + rc = of_property_read_string_index(dev->of_node, "clock-names", + i, &clk_name); + if (rc) { + DRM_ERROR("error reading clock-names %d\n", rc); + return rc; + } + if (dp_parser_check_prefix("core", clk_name) && + core_clk_index < core_clk_count) { + core_power->clocks[core_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL); + core_clk_index++; + } else if (dp_parser_check_prefix("stream", clk_name) && + stream_clk_index < stream_clk_count) { + stream_power->clocks[stream_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL); + stream_clk_index++; + } else if (dp_parser_check_prefix("ctrl", clk_name) && + ctrl_clk_index < ctrl_clk_count) { + ctrl_power->clocks[ctrl_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL); + ctrl_clk_index++; + } + } + + return 0; +} + +int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser) +{ + struct platform_device *pdev = parser->pdev; + struct drm_bridge *bridge; + + bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + parser->next_bridge = bridge; + + return 0; +} + +static int dp_parser_parse(struct dp_parser *parser) +{ + int rc = 0; + + if (!parser) { + DRM_ERROR("invalid input\n"); + return -EINVAL; + } + + rc = dp_parser_ctrl_res(parser); + if (rc) + return rc; + + rc = dp_parser_misc(parser); + if (rc) + return rc; + + rc = dp_parser_clock(parser); + if (rc) + return rc; + + return 0; +} + +struct dp_parser *dp_parser_get(struct platform_device *pdev) +{ + struct dp_parser *parser; + + parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL); + if (!parser) + return ERR_PTR(-ENOMEM); + + parser->parse = dp_parser_parse; + parser->pdev = pdev; + + return parser; +} diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h new file mode 100644 index 000000000..d30ab773d --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_parser.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_PARSER_H_ +#define _DP_PARSER_H_ + +#include +#include +#include + +#include "msm_drv.h" + +#define DP_LABEL "MDSS DP DISPLAY" +#define DP_MAX_PIXEL_CLK_KHZ 675000 +#define DP_MAX_NUM_DP_LANES 4 + +enum dp_pm_type { + DP_CORE_PM, + DP_CTRL_PM, + DP_STREAM_PM, + DP_PHY_PM, + DP_MAX_PM +}; + +struct dss_io_region { + size_t len; + void __iomem *base; +}; + +struct dss_io_data { + struct dss_io_region ahb; + struct dss_io_region aux; + struct dss_io_region link; + struct dss_io_region p0; +}; + +static inline const char *dp_parser_pm_name(enum dp_pm_type module) +{ + switch (module) { + case DP_CORE_PM: return "DP_CORE_PM"; + case DP_CTRL_PM: return "DP_CTRL_PM"; + case DP_STREAM_PM: return "DP_STREAM_PM"; + case DP_PHY_PM: return "DP_PHY_PM"; + default: return "???"; + } +} + +/** + * struct dp_display_data - display related device tree data. + * + * @ctrl_node: referece to controller device + * @phy_node: reference to phy device + * @is_active: is the controller currently active + * @name: name of the display + * @display_type: type of the display + */ +struct dp_display_data { + struct device_node *ctrl_node; + struct device_node *phy_node; + bool is_active; + const char *name; + const char *display_type; +}; + +/** + * struct dp_ctrl_resource - controller's IO related data + * + * @dp_controller: Display Port controller mapped memory address + * @phy_io: phy's mapped memory address + */ +struct dp_io { + struct dss_io_data dp_controller; + struct phy *phy; + union phy_configure_opts phy_opts; +}; + +/** + * struct dp_pinctrl - DP's pin control + * + * @pin: pin-controller's instance + * @state_active: active state pin control + * @state_hpd_active: hpd active state pin control + * @state_suspend: suspend state pin control + */ +struct dp_pinctrl { + struct pinctrl *pin; + struct pinctrl_state *state_active; + struct pinctrl_state *state_hpd_active; + struct pinctrl_state *state_suspend; +}; + +/* Regulators for DP devices */ +struct dp_reg_entry { + char name[32]; + int enable_load; + int disable_load; +}; + +struct dss_module_power { + unsigned int num_clk; + struct clk_bulk_data *clocks; +}; + +/** + * struct dp_parser - DP parser's data exposed to clients + * + * @pdev: platform data of the client + * @mp: gpio, regulator and clock related data + * @pinctrl: pin-control related data + * @disp_data: controller's display related data + * @parse: function to be called by client to parse device tree. + */ +struct dp_parser { + struct platform_device *pdev; + struct dss_module_power mp[DP_MAX_PM]; + struct dp_pinctrl pinctrl; + struct dp_io io; + struct dp_display_data disp_data; + u32 max_dp_lanes; + struct drm_bridge *next_bridge; + + int (*parse)(struct dp_parser *parser); +}; + +/** + * dp_parser_get() - get the DP's device tree parser module + * + * @pdev: platform data of the client + * return: pointer to dp_parser structure. + * + * This function provides client capability to parse the + * device tree and populate the data structures. The data + * related to clock, regulators, pin-control and other + * can be parsed using this module. + */ +struct dp_parser *dp_parser_get(struct platform_device *pdev); + +/** + * devm_dp_parser_find_next_bridge() - find an additional bridge to DP + * + * @dev: device to tie bridge lifetime to + * @parser: dp_parser data from client + * + * This function is used to find any additional bridge attached to + * the DP controller. The eDP interface requires a panel bridge. + * + * Return: 0 if able to get the bridge, otherwise negative errno for failure. + */ +int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser); + +#endif diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c new file mode 100644 index 000000000..c0aaabb03 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_power.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__ + +#include +#include +#include +#include +#include "dp_power.h" +#include "msm_drv.h" + +struct dp_power_private { + struct dp_parser *parser; + struct platform_device *pdev; + struct device *dev; + struct drm_device *drm_dev; + struct clk *link_clk_src; + struct clk *pixel_provider; + struct clk *link_provider; + + struct dp_power dp_power; +}; + +static int dp_power_clk_init(struct dp_power_private *power) +{ + int rc = 0; + struct dss_module_power *core, *ctrl, *stream; + struct device *dev = &power->pdev->dev; + + core = &power->parser->mp[DP_CORE_PM]; + ctrl = &power->parser->mp[DP_CTRL_PM]; + stream = &power->parser->mp[DP_STREAM_PM]; + + rc = devm_clk_bulk_get(dev, core->num_clk, core->clocks); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + return rc; + } + + rc = devm_clk_bulk_get(dev, ctrl->num_clk, ctrl->clocks); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CTRL_PM), rc); + return -ENODEV; + } + + rc = devm_clk_bulk_get(dev, stream->num_clk, stream->clocks); + if (rc) { + DRM_ERROR("failed to get %s clk. err=%d\n", + dp_parser_pm_name(DP_CTRL_PM), rc); + return -ENODEV; + } + + return 0; +} + +int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type) +{ + struct dp_power_private *power; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + drm_dbg_dp(power->drm_dev, + "core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n", + dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on); + + if (pm_type == DP_CORE_PM) + return dp_power->core_clks_on; + + if (pm_type == DP_CTRL_PM) + return dp_power->link_clks_on; + + if (pm_type == DP_STREAM_PM) + return dp_power->stream_clks_on; + + return 0; +} + +int dp_power_clk_enable(struct dp_power *dp_power, + enum dp_pm_type pm_type, bool enable) +{ + int rc = 0; + struct dp_power_private *power; + struct dss_module_power *mp; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM && + pm_type != DP_STREAM_PM) { + DRM_ERROR("unsupported power module: %s\n", + dp_parser_pm_name(pm_type)); + return -EINVAL; + } + + if (enable) { + if (pm_type == DP_CORE_PM && dp_power->core_clks_on) { + drm_dbg_dp(power->drm_dev, + "core clks already enabled\n"); + return 0; + } + + if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) { + drm_dbg_dp(power->drm_dev, + "links clks already enabled\n"); + return 0; + } + + if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) { + drm_dbg_dp(power->drm_dev, + "pixel clks already enabled\n"); + return 0; + } + + if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) { + drm_dbg_dp(power->drm_dev, + "Enable core clks before link clks\n"); + mp = &power->parser->mp[DP_CORE_PM]; + + rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks); + if (rc) { + DRM_ERROR("fail to enable clks: %s. err=%d\n", + dp_parser_pm_name(DP_CORE_PM), rc); + return rc; + } + dp_power->core_clks_on = true; + } + } + + mp = &power->parser->mp[pm_type]; + if (enable) { + rc = clk_bulk_prepare_enable(mp->num_clk, mp->clocks); + if (rc) { + DRM_ERROR("failed to enable clks, err: %d\n", rc); + return rc; + } + } else { + clk_bulk_disable_unprepare(mp->num_clk, mp->clocks); + } + + if (pm_type == DP_CORE_PM) + dp_power->core_clks_on = enable; + else if (pm_type == DP_STREAM_PM) + dp_power->stream_clks_on = enable; + else + dp_power->link_clks_on = enable; + + drm_dbg_dp(power->drm_dev, "%s clocks for %s\n", + enable ? "enable" : "disable", + dp_parser_pm_name(pm_type)); + drm_dbg_dp(power->drm_dev, + "strem_clks:%s link_clks:%s core_clks:%s\n", + dp_power->stream_clks_on ? "on" : "off", + dp_power->link_clks_on ? "on" : "off", + dp_power->core_clks_on ? "on" : "off"); + + return 0; +} + +int dp_power_client_init(struct dp_power *dp_power) +{ + int rc = 0; + struct dp_power_private *power; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + pm_runtime_enable(&power->pdev->dev); + + rc = dp_power_clk_init(power); + if (rc) + DRM_ERROR("failed to init clocks %d\n", rc); + + return rc; +} + +void dp_power_client_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + pm_runtime_disable(&power->pdev->dev); +} + +int dp_power_init(struct dp_power *dp_power, bool flip) +{ + int rc = 0; + struct dp_power_private *power = NULL; + + if (!dp_power) { + DRM_ERROR("invalid power data\n"); + return -EINVAL; + } + + power = container_of(dp_power, struct dp_power_private, dp_power); + + pm_runtime_get_sync(&power->pdev->dev); + + rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true); + if (rc) { + DRM_ERROR("failed to enable DP core clocks, %d\n", rc); + goto exit; + } + + return 0; + +exit: + pm_runtime_put_sync(&power->pdev->dev); + return rc; +} + +int dp_power_deinit(struct dp_power *dp_power) +{ + struct dp_power_private *power; + + power = container_of(dp_power, struct dp_power_private, dp_power); + + dp_power_clk_enable(dp_power, DP_CORE_PM, false); + pm_runtime_put_sync(&power->pdev->dev); + return 0; +} + +struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser) +{ + struct dp_power_private *power; + struct dp_power *dp_power; + + if (!parser) { + DRM_ERROR("invalid input\n"); + return ERR_PTR(-EINVAL); + } + + power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL); + if (!power) + return ERR_PTR(-ENOMEM); + + power->parser = parser; + power->pdev = parser->pdev; + power->dev = dev; + + dp_power = &power->dp_power; + + return dp_power; +} diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h new file mode 100644 index 000000000..e3f959ffa --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_power.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_POWER_H_ +#define _DP_POWER_H_ + +#include "dp_parser.h" + +/** + * sruct dp_power - DisplayPort's power related data + * + * @init: initializes the regulators/core clocks/GPIOs/pinctrl + * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl + * @clk_enable: enable/disable the DP clocks + * @set_pixel_clk_parent: set the parent of DP pixel clock + */ +struct dp_power { + bool core_clks_on; + bool link_clks_on; + bool stream_clks_on; +}; + +/** + * dp_power_init() - enable power supplies for display controller + * + * @power: instance of power module + * @flip: bool for flipping gpio direction + * return: 0 if success or error if failure. + * + * This API will turn on the regulators and configures gpio's + * aux/hpd. + */ +int dp_power_init(struct dp_power *power, bool flip); + +/** + * dp_power_deinit() - turn off regulators and gpios. + * + * @power: instance of power module + * return: 0 for success + * + * This API turns off power and regulators. + */ +int dp_power_deinit(struct dp_power *power); + +/** + * dp_power_clk_status() - display controller clocks status + * + * @power: instance of power module + * @pm_type: type of pm, core/ctrl/phy + * return: status of power clocks + * + * This API return status of DP clocks + */ + +int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type); + +/** + * dp_power_clk_enable() - enable display controller clocks + * + * @power: instance of power module + * @pm_type: type of pm, core/ctrl/phy + * @enable: enables or disables + * return: pointer to allocated power module data + * + * This API will call setrate and enable for DP clocks + */ + +int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type, + bool enable); + +/** + * dp_power_client_init() - initialize clock and regulator modules + * + * @power: instance of power module + * return: 0 for success, error for failure. + * + * This API will configure the DisplayPort's clocks and regulator + * modules. + */ +int dp_power_client_init(struct dp_power *power); + +/** + * dp_power_clinet_deinit() - de-initialize clock and regulator modules + * + * @power: instance of power module + * return: 0 for success, error for failure. + * + * This API will de-initialize the DisplayPort's clocks and regulator + * modules. + */ +void dp_power_client_deinit(struct dp_power *power); + +/** + * dp_power_get() - configure and get the DisplayPort power module data + * + * @parser: instance of parser module + * return: pointer to allocated power module data + * + * This API will configure the DisplayPort's power module and provides + * methods to be called by the client to configure the power related + * modules. + */ +struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser); + +#endif /* _DP_POWER_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h new file mode 100644 index 000000000..268602803 --- /dev/null +++ b/drivers/gpu/drm/msm/dp/dp_reg.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DP_REG_H_ +#define _DP_REG_H_ + +/* DP_TX Registers */ +#define REG_DP_HW_VERSION (0x00000000) + +#define REG_DP_SW_RESET (0x00000010) +#define DP_SW_RESET (0x00000001) + +#define REG_DP_PHY_CTRL (0x00000014) +#define DP_PHY_CTRL_SW_RESET_PLL (0x00000001) +#define DP_PHY_CTRL_SW_RESET (0x00000004) + +#define REG_DP_CLK_CTRL (0x00000018) +#define REG_DP_CLK_ACTIVE (0x0000001C) +#define REG_DP_INTR_STATUS (0x00000020) +#define REG_DP_INTR_STATUS2 (0x00000024) +#define REG_DP_INTR_STATUS3 (0x00000028) + +#define REG_DP_DP_HPD_CTRL (0x00000000) +#define DP_DP_HPD_CTRL_HPD_EN (0x00000001) + +#define REG_DP_DP_HPD_INT_STATUS (0x00000004) + +#define REG_DP_DP_HPD_INT_ACK (0x00000008) +#define DP_DP_HPD_PLUG_INT_ACK (0x00000001) +#define DP_DP_IRQ_HPD_INT_ACK (0x00000002) +#define DP_DP_HPD_REPLUG_INT_ACK (0x00000004) +#define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008) +#define DP_DP_HPD_STATE_STATUS_BITS_MASK (0x0000000F) +#define DP_DP_HPD_STATE_STATUS_BITS_SHIFT (0x1C) + +#define REG_DP_DP_HPD_INT_MASK (0x0000000C) +#define DP_DP_HPD_PLUG_INT_MASK (0x00000001) +#define DP_DP_IRQ_HPD_INT_MASK (0x00000002) +#define DP_DP_HPD_REPLUG_INT_MASK (0x00000004) +#define DP_DP_HPD_UNPLUG_INT_MASK (0x00000008) +#define DP_DP_HPD_INT_MASK (DP_DP_HPD_PLUG_INT_MASK | \ + DP_DP_IRQ_HPD_INT_MASK | \ + DP_DP_HPD_REPLUG_INT_MASK | \ + DP_DP_HPD_UNPLUG_INT_MASK) +#define DP_DP_HPD_STATE_STATUS_CONNECTED (0x40000000) +#define DP_DP_HPD_STATE_STATUS_PENDING (0x20000000) +#define DP_DP_HPD_STATE_STATUS_DISCONNECTED (0x00000000) +#define DP_DP_HPD_STATE_STATUS_MASK (0xE0000000) + +#define REG_DP_DP_HPD_REFTIMER (0x00000018) +#define DP_DP_HPD_REFTIMER_ENABLE (1 << 16) + +#define REG_DP_DP_HPD_EVENT_TIME_0 (0x0000001C) +#define REG_DP_DP_HPD_EVENT_TIME_1 (0x00000020) +#define DP_DP_HPD_EVENT_TIME_0_VAL (0x3E800FA) +#define DP_DP_HPD_EVENT_TIME_1_VAL (0x1F407D0) + +#define REG_DP_AUX_CTRL (0x00000030) +#define DP_AUX_CTRL_ENABLE (0x00000001) +#define DP_AUX_CTRL_RESET (0x00000002) + +#define REG_DP_AUX_DATA (0x00000034) +#define DP_AUX_DATA_READ (0x00000001) +#define DP_AUX_DATA_WRITE (0x00000000) +#define DP_AUX_DATA_OFFSET (0x00000008) +#define DP_AUX_DATA_INDEX_OFFSET (0x00000010) +#define DP_AUX_DATA_MASK (0x0000ff00) +#define DP_AUX_DATA_INDEX_WRITE (0x80000000) + +#define REG_DP_AUX_TRANS_CTRL (0x00000038) +#define DP_AUX_TRANS_CTRL_I2C (0x00000100) +#define DP_AUX_TRANS_CTRL_GO (0x00000200) +#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR (0x00000400) +#define DP_AUX_TRANS_CTRL_NO_SEND_STOP (0x00000800) + +#define REG_DP_TIMEOUT_COUNT (0x0000003C) +#define REG_DP_AUX_LIMITS (0x00000040) +#define REG_DP_AUX_STATUS (0x00000044) + +#define DP_DPCD_CP_IRQ (0x201) +#define DP_DPCD_RXSTATUS (0x69493) + +#define DP_INTERRUPT_TRANS_NUM (0x000000A0) + +#define REG_DP_MAINLINK_CTRL (0x00000000) +#define DP_MAINLINK_CTRL_ENABLE (0x00000001) +#define DP_MAINLINK_CTRL_RESET (0x00000002) +#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER (0x00000010) +#define DP_MAINLINK_FB_BOUNDARY_SEL (0x02000000) + +#define REG_DP_STATE_CTRL (0x00000004) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1 (0x00000001) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2 (0x00000002) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3 (0x00000004) +#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4 (0x00000008) +#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE (0x00000010) +#define DP_STATE_CTRL_LINK_PRBS7 (0x00000020) +#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040) +#define DP_STATE_CTRL_SEND_VIDEO (0x00000080) +#define DP_STATE_CTRL_PUSH_IDLE (0x00000100) + +#define REG_DP_CONFIGURATION_CTRL (0x00000008) +#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK (0x00000001) +#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002) +#define DP_CONFIGURATION_CTRL_P_INTERLACED (0x00000004) +#define DP_CONFIGURATION_CTRL_INTERLACED_BTF (0x00000008) +#define DP_CONFIGURATION_CTRL_NUM_OF_LANES (0x00000010) +#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040) +#define DP_CONFIGURATION_CTRL_SEND_VSC (0x00000080) +#define DP_CONFIGURATION_CTRL_BPC (0x00000100) +#define DP_CONFIGURATION_CTRL_ASSR (0x00000400) +#define DP_CONFIGURATION_CTRL_RGB_YUV (0x00000800) +#define DP_CONFIGURATION_CTRL_LSCLK_DIV (0x00002000) +#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT (0x04) +#define DP_CONFIGURATION_CTRL_BPC_SHIFT (0x08) +#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT (0x0D) + +#define REG_DP_SOFTWARE_MVID (0x00000010) +#define REG_DP_SOFTWARE_NVID (0x00000018) +#define REG_DP_TOTAL_HOR_VER (0x0000001C) +#define REG_DP_START_HOR_VER_FROM_SYNC (0x00000020) +#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY (0x00000024) +#define REG_DP_ACTIVE_HOR_VER (0x00000028) + +#define REG_DP_MISC1_MISC0 (0x0000002C) +#define DP_MISC0_SYNCHRONOUS_CLK (0x00000001) +#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001) +#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005) + +#define REG_DP_VALID_BOUNDARY (0x00000030) +#define REG_DP_VALID_BOUNDARY_2 (0x00000034) + +#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING (0x00000038) +#define LANE0_MAPPING_SHIFT (0x00000000) +#define LANE1_MAPPING_SHIFT (0x00000002) +#define LANE2_MAPPING_SHIFT (0x00000004) +#define LANE3_MAPPING_SHIFT (0x00000006) + +#define REG_DP_MAINLINK_READY (0x00000040) +#define DP_MAINLINK_READY_FOR_VIDEO (0x00000001) +#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT (0x00000003) + +#define REG_DP_MAINLINK_LEVELS (0x00000044) +#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2 (0x00000002) + + +#define REG_DP_TU (0x0000004C) + +#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054) +#define DP_HBR2_ERM_PATTERN (0x00010000) + +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0 (0x000000C0) +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1 (0x000000C4) +#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2 (0x000000C8) + +#define MMSS_DP_MISC1_MISC0 (0x0000002C) +#define MMSS_DP_AUDIO_TIMING_GEN (0x00000080) +#define MMSS_DP_AUDIO_TIMING_RBR_32 (0x00000084) +#define MMSS_DP_AUDIO_TIMING_HBR_32 (0x00000088) +#define MMSS_DP_AUDIO_TIMING_RBR_44 (0x0000008C) +#define MMSS_DP_AUDIO_TIMING_HBR_44 (0x00000090) +#define MMSS_DP_AUDIO_TIMING_RBR_48 (0x00000094) +#define MMSS_DP_AUDIO_TIMING_HBR_48 (0x00000098) + +#define MMSS_DP_PSR_CRC_RG (0x00000154) +#define MMSS_DP_PSR_CRC_B (0x00000158) + +#define REG_DP_COMPRESSION_MODE_CTRL (0x00000180) + +#define MMSS_DP_AUDIO_CFG (0x00000200) +#define MMSS_DP_AUDIO_STATUS (0x00000204) +#define MMSS_DP_AUDIO_PKT_CTRL (0x00000208) +#define MMSS_DP_AUDIO_PKT_CTRL2 (0x0000020C) +#define MMSS_DP_AUDIO_ACR_CTRL (0x00000210) +#define MMSS_DP_AUDIO_CTRL_RESET (0x00000214) + +#define MMSS_DP_SDP_CFG (0x00000228) +#define MMSS_DP_SDP_CFG2 (0x0000022C) +#define MMSS_DP_AUDIO_TIMESTAMP_0 (0x00000230) +#define MMSS_DP_AUDIO_TIMESTAMP_1 (0x00000234) + +#define MMSS_DP_AUDIO_STREAM_0 (0x00000240) +#define MMSS_DP_AUDIO_STREAM_1 (0x00000244) + +#define MMSS_DP_EXTENSION_0 (0x00000250) +#define MMSS_DP_EXTENSION_1 (0x00000254) +#define MMSS_DP_EXTENSION_2 (0x00000258) +#define MMSS_DP_EXTENSION_3 (0x0000025C) +#define MMSS_DP_EXTENSION_4 (0x00000260) +#define MMSS_DP_EXTENSION_5 (0x00000264) +#define MMSS_DP_EXTENSION_6 (0x00000268) +#define MMSS_DP_EXTENSION_7 (0x0000026C) +#define MMSS_DP_EXTENSION_8 (0x00000270) +#define MMSS_DP_EXTENSION_9 (0x00000274) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_0 (0x00000278) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_1 (0x0000027C) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_2 (0x00000280) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_3 (0x00000284) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_4 (0x00000288) +#define MMSS_DP_AUDIO_COPYMANAGEMENT_5 (0x0000028C) +#define MMSS_DP_AUDIO_ISRC_0 (0x00000290) +#define MMSS_DP_AUDIO_ISRC_1 (0x00000294) +#define MMSS_DP_AUDIO_ISRC_2 (0x00000298) +#define MMSS_DP_AUDIO_ISRC_3 (0x0000029C) +#define MMSS_DP_AUDIO_ISRC_4 (0x000002A0) +#define MMSS_DP_AUDIO_ISRC_5 (0x000002A4) +#define MMSS_DP_AUDIO_INFOFRAME_0 (0x000002A8) +#define MMSS_DP_AUDIO_INFOFRAME_1 (0x000002AC) +#define MMSS_DP_AUDIO_INFOFRAME_2 (0x000002B0) + +#define MMSS_DP_GENERIC0_0 (0x00000300) +#define MMSS_DP_GENERIC0_1 (0x00000304) +#define MMSS_DP_GENERIC0_2 (0x00000308) +#define MMSS_DP_GENERIC0_3 (0x0000030C) +#define MMSS_DP_GENERIC0_4 (0x00000310) +#define MMSS_DP_GENERIC0_5 (0x00000314) +#define MMSS_DP_GENERIC0_6 (0x00000318) +#define MMSS_DP_GENERIC0_7 (0x0000031C) +#define MMSS_DP_GENERIC0_8 (0x00000320) +#define MMSS_DP_GENERIC0_9 (0x00000324) +#define MMSS_DP_GENERIC1_0 (0x00000328) +#define MMSS_DP_GENERIC1_1 (0x0000032C) +#define MMSS_DP_GENERIC1_2 (0x00000330) +#define MMSS_DP_GENERIC1_3 (0x00000334) +#define MMSS_DP_GENERIC1_4 (0x00000338) +#define MMSS_DP_GENERIC1_5 (0x0000033C) +#define MMSS_DP_GENERIC1_6 (0x00000340) +#define MMSS_DP_GENERIC1_7 (0x00000344) +#define MMSS_DP_GENERIC1_8 (0x00000348) +#define MMSS_DP_GENERIC1_9 (0x0000034C) + +#define MMSS_DP_VSCEXT_0 (0x000002D0) +#define MMSS_DP_VSCEXT_1 (0x000002D4) +#define MMSS_DP_VSCEXT_2 (0x000002D8) +#define MMSS_DP_VSCEXT_3 (0x000002DC) +#define MMSS_DP_VSCEXT_4 (0x000002E0) +#define MMSS_DP_VSCEXT_5 (0x000002E4) +#define MMSS_DP_VSCEXT_6 (0x000002E8) +#define MMSS_DP_VSCEXT_7 (0x000002EC) +#define MMSS_DP_VSCEXT_8 (0x000002F0) +#define MMSS_DP_VSCEXT_9 (0x000002F4) + +#define MMSS_DP_BIST_ENABLE (0x00000000) +#define DP_BIST_ENABLE_DPBIST_EN (0x00000001) + +#define MMSS_DP_TIMING_ENGINE_EN (0x00000010) +#define DP_TIMING_ENGINE_EN_EN (0x00000001) + +#define MMSS_DP_INTF_CONFIG (0x00000014) +#define MMSS_DP_INTF_HSYNC_CTL (0x00000018) +#define MMSS_DP_INTF_VSYNC_PERIOD_F0 (0x0000001C) +#define MMSS_DP_INTF_VSYNC_PERIOD_F1 (0x00000020) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0 (0x00000024) +#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1 (0x00000028) +#define MMSS_INTF_DISPLAY_V_START_F0 (0x0000002C) +#define MMSS_INTF_DISPLAY_V_START_F1 (0x00000030) +#define MMSS_DP_INTF_DISPLAY_V_END_F0 (0x00000034) +#define MMSS_DP_INTF_DISPLAY_V_END_F1 (0x00000038) +#define MMSS_DP_INTF_ACTIVE_V_START_F0 (0x0000003C) +#define MMSS_DP_INTF_ACTIVE_V_START_F1 (0x00000040) +#define MMSS_DP_INTF_ACTIVE_V_END_F0 (0x00000044) +#define MMSS_DP_INTF_ACTIVE_V_END_F1 (0x00000048) +#define MMSS_DP_INTF_DISPLAY_HCTL (0x0000004C) +#define MMSS_DP_INTF_ACTIVE_HCTL (0x00000050) +#define MMSS_DP_INTF_POLARITY_CTL (0x00000058) + +#define MMSS_DP_TPG_MAIN_CONTROL (0x00000060) +#define MMSS_DP_DSC_DTO (0x0000007C) +#define DP_TPG_CHECKERED_RECT_PATTERN (0x00000100) + +#define MMSS_DP_TPG_VIDEO_CONFIG (0x00000064) +#define DP_TPG_VIDEO_CONFIG_BPP_8BIT (0x00000001) +#define DP_TPG_VIDEO_CONFIG_RGB (0x00000004) + +#define MMSS_DP_ASYNC_FIFO_CONFIG (0x00000088) + +#define REG_DP_PHY_AUX_INTERRUPT_CLEAR (0x0000004C) +#define REG_DP_PHY_AUX_BIST_CFG (0x00000050) +#define REG_DP_PHY_AUX_INTERRUPT_STATUS (0x000000BC) + +/* DP HDCP 1.3 registers */ +#define DP_HDCP_CTRL (0x0A0) +#define DP_HDCP_STATUS (0x0A4) +#define DP_HDCP_SW_UPPER_AKSV (0x098) +#define DP_HDCP_SW_LOWER_AKSV (0x09C) +#define DP_HDCP_ENTROPY_CTRL0 (0x350) +#define DP_HDCP_ENTROPY_CTRL1 (0x35C) +#define DP_HDCP_SHA_STATUS (0x0C8) +#define DP_HDCP_RCVPORT_DATA2_0 (0x0B0) +#define DP_HDCP_RCVPORT_DATA3 (0x0A4) +#define DP_HDCP_RCVPORT_DATA4 (0x0A8) +#define DP_HDCP_RCVPORT_DATA5 (0x0C0) +#define DP_HDCP_RCVPORT_DATA6 (0x0C4) + +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL (0x024) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA (0x028) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x004) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x008) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x00C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x010) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x014) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x018) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C) +#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020) + +#endif /* _DP_REG_H_ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c new file mode 100644 index 000000000..e9036e403 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include "dsi.h" +#include "dsi_cfg.h" + +bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) +{ + unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); + + return !(host_flags & MIPI_DSI_MODE_VIDEO); +} + +struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi) +{ + return msm_dsi_host_get_dsc_config(msm_dsi->host); +} + +static int dsi_get_phy(struct msm_dsi *msm_dsi) +{ + struct platform_device *pdev = msm_dsi->pdev; + struct platform_device *phy_pdev; + struct device_node *phy_node; + + phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0); + if (!phy_node) { + DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n"); + return -ENXIO; + } + + phy_pdev = of_find_device_by_node(phy_node); + if (phy_pdev) { + msm_dsi->phy = platform_get_drvdata(phy_pdev); + msm_dsi->phy_dev = &phy_pdev->dev; + } + + of_node_put(phy_node); + + if (!phy_pdev) { + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + if (!msm_dsi->phy) { + put_device(&phy_pdev->dev); + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + + return 0; +} + +static void dsi_destroy(struct msm_dsi *msm_dsi) +{ + if (!msm_dsi) + return; + + msm_dsi_manager_unregister(msm_dsi); + + if (msm_dsi->phy_dev) { + put_device(msm_dsi->phy_dev); + msm_dsi->phy = NULL; + msm_dsi->phy_dev = NULL; + } + + if (msm_dsi->host) { + msm_dsi_host_destroy(msm_dsi->host); + msm_dsi->host = NULL; + } + + platform_set_drvdata(msm_dsi->pdev, NULL); +} + +static struct msm_dsi *dsi_init(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi; + int ret; + + if (!pdev) + return ERR_PTR(-ENXIO); + + msm_dsi = devm_kzalloc(&pdev->dev, sizeof(*msm_dsi), GFP_KERNEL); + if (!msm_dsi) + return ERR_PTR(-ENOMEM); + DBG("dsi probed=%p", msm_dsi); + + msm_dsi->id = -1; + msm_dsi->pdev = pdev; + platform_set_drvdata(pdev, msm_dsi); + + /* Init dsi host */ + ret = msm_dsi_host_init(msm_dsi); + if (ret) + goto destroy_dsi; + + /* GET dsi PHY */ + ret = dsi_get_phy(msm_dsi); + if (ret) + goto destroy_dsi; + + /* Register to dsi manager */ + ret = msm_dsi_manager_register(msm_dsi); + if (ret) + goto destroy_dsi; + + return msm_dsi; + +destroy_dsi: + dsi_destroy(msm_dsi); + return ERR_PTR(ret); +} + +static int dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct msm_drm_private *priv = dev_get_drvdata(master); + struct msm_dsi *msm_dsi = dev_get_drvdata(dev); + + priv->dsi[msm_dsi->id] = msm_dsi; + + return 0; +} + +static void dsi_unbind(struct device *dev, struct device *master, + void *data) +{ + struct msm_drm_private *priv = dev_get_drvdata(master); + struct msm_dsi *msm_dsi = dev_get_drvdata(dev); + + msm_dsi_tx_buf_free(msm_dsi->host); + priv->dsi[msm_dsi->id] = NULL; +} + +static const struct component_ops dsi_ops = { + .bind = dsi_bind, + .unbind = dsi_unbind, +}; + +int dsi_dev_attach(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dsi_ops); +} + +void dsi_dev_detach(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dsi_ops); +} + +static int dsi_dev_probe(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi; + + DBG(""); + msm_dsi = dsi_init(pdev); + if (IS_ERR(msm_dsi)) { + /* Don't fail the bind if the dsi port is not connected */ + if (PTR_ERR(msm_dsi) == -ENODEV) + return 0; + else + return PTR_ERR(msm_dsi); + } + + return 0; +} + +static int dsi_dev_remove(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + + DBG(""); + dsi_destroy(msm_dsi); + + return 0; +} + +static const struct of_device_id dt_match[] = { + { .compatible = "qcom,mdss-dsi-ctrl", .data = NULL /* autodetect cfg */ }, + { .compatible = "qcom,dsi-ctrl-6g-qcm2290", .data = &qcm2290_dsi_cfg_handler }, + {} +}; + +static const struct dev_pm_ops dsi_pm_ops = { + SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + +static struct platform_driver dsi_driver = { + .probe = dsi_dev_probe, + .remove = dsi_dev_remove, + .driver = { + .name = "msm_dsi", + .of_match_table = dt_match, + .pm = &dsi_pm_ops, + }, +}; + +void __init msm_dsi_register(void) +{ + DBG(""); + msm_dsi_phy_driver_register(); + platform_driver_register(&dsi_driver); +} + +void __exit msm_dsi_unregister(void) +{ + DBG(""); + msm_dsi_phy_driver_unregister(); + platform_driver_unregister(&dsi_driver); +} + +int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, + struct drm_encoder *encoder) +{ + struct msm_drm_private *priv; + int ret; + + if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev)) + return -EINVAL; + + priv = dev->dev_private; + + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + + msm_dsi->dev = dev; + + ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); + if (ret) { + DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret); + goto fail; + } + + if (msm_dsi_is_bonded_dsi(msm_dsi) && + !msm_dsi_is_master_dsi(msm_dsi)) { + /* + * Do not return an eror here, + * Just skip creating encoder/connector for the slave-DSI. + */ + return 0; + } + + msm_dsi->encoder = encoder; + + msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); + if (IS_ERR(msm_dsi->bridge)) { + ret = PTR_ERR(msm_dsi->bridge); + DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret); + msm_dsi->bridge = NULL; + goto fail; + } + + ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id); + if (ret) { + DRM_DEV_ERROR(dev->dev, + "failed to create dsi connector: %d\n", ret); + goto fail; + } + + priv->bridges[priv->num_bridges++] = msm_dsi->bridge; + + return 0; +fail: + /* bridge/connector are normally destroyed by drm: */ + if (msm_dsi->bridge) { + msm_dsi_manager_bridge_destroy(msm_dsi->bridge); + msm_dsi->bridge = NULL; + } + + return ret; +} + +void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi) +{ + msm_dsi_host_snapshot(disp_state, msm_dsi->host); + msm_dsi_phy_snapshot(disp_state, msm_dsi->phy); +} + diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h new file mode 100644 index 000000000..6b239f77f --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef __DSI_CONNECTOR_H__ +#define __DSI_CONNECTOR_H__ + +#include +#include + +#include +#include +#include + +#include "msm_drv.h" +#include "disp/msm_disp_snapshot.h" + +#define DSI_0 0 +#define DSI_1 1 +#define DSI_MAX 2 + +struct msm_dsi_phy_shared_timings; +struct msm_dsi_phy_clk_request; + +enum msm_dsi_phy_usecase { + MSM_DSI_PHY_STANDALONE, + MSM_DSI_PHY_MASTER, + MSM_DSI_PHY_SLAVE, +}; + +#define DSI_BUS_CLK_MAX 4 + +struct msm_dsi { + struct drm_device *dev; + struct platform_device *pdev; + + /* internal dsi bridge attached to MDP interface */ + struct drm_bridge *bridge; + + struct mipi_dsi_host *host; + struct msm_dsi_phy *phy; + + /* + * external_bridge connected to dsi bridge output + */ + struct drm_bridge *external_bridge; + + struct device *phy_dev; + bool phy_enabled; + + /* the encoder we are hooked to (outside of dsi block) */ + struct drm_encoder *encoder; + + int id; +}; + +/* dsi manager */ +struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); +void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); +int msm_dsi_manager_ext_bridge_init(u8 id); +int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); +bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); +int msm_dsi_manager_register(struct msm_dsi *msm_dsi); +void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); +void msm_dsi_manager_tpg_enable(void); + +/* msm dsi */ +static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi) +{ + return msm_dsi->external_bridge; +} + +/* dsi host */ +struct msm_dsi_host; +int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, + u32 dma_base, u32 len); +int msm_dsi_host_enable(struct mipi_dsi_host *host); +int msm_dsi_host_disable(struct mipi_dsi_host *host); +void msm_dsi_host_enable_irq(struct mipi_dsi_host *host); +void msm_dsi_host_disable_irq(struct mipi_dsi_host *host); +int msm_dsi_host_power_on(struct mipi_dsi_host *host, + struct msm_dsi_phy_shared_timings *phy_shared_timings, + bool is_bonded_dsi, struct msm_dsi_phy *phy); +int msm_dsi_host_power_off(struct mipi_dsi_host *host); +int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, + const struct drm_display_mode *mode); +enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host, + const struct drm_display_mode *mode); +unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host); +int msm_dsi_host_register(struct mipi_dsi_host *host); +void msm_dsi_host_unregister(struct mipi_dsi_host *host); +void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, + struct msm_dsi_phy *src_phy); +int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, + struct msm_dsi_phy *src_phy); +void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); +void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, + struct msm_dsi_phy_clk_request *clk_req, + bool is_bonded_dsi); +void msm_dsi_host_destroy(struct mipi_dsi_host *host); +int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, + struct drm_device *dev); +int msm_dsi_host_init(struct msm_dsi *msm_dsi); +int msm_dsi_runtime_suspend(struct device *dev); +int msm_dsi_runtime_resume(struct device *dev); +int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host); +int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host); +int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host); +int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host); +void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host); +void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host); +int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size); +int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size); +void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host); +void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host); +void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host); +void msm_dsi_tx_buf_free(struct mipi_dsi_host *mipi_host); +int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova); +int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova); +int dsi_clk_init_v2(struct msm_dsi_host *msm_host); +int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host); +int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi); +int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi); +void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host); +void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host); +struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host); + +/* dsi phy */ +struct msm_dsi_phy; +struct msm_dsi_phy_shared_timings { + u32 clk_post; + u32 clk_pre; + bool clk_pre_inc_by_2; +}; + +struct msm_dsi_phy_clk_request { + unsigned long bitclk_rate; + unsigned long escclk_rate; +}; + +void msm_dsi_phy_driver_register(void); +void msm_dsi_phy_driver_unregister(void); +int msm_dsi_phy_enable(struct msm_dsi_phy *phy, + struct msm_dsi_phy_clk_request *clk_req, + struct msm_dsi_phy_shared_timings *shared_timings); +void msm_dsi_phy_disable(struct msm_dsi_phy *phy); +void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, + enum msm_dsi_phy_usecase uc); +void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy); +int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy); +void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy); +bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable); + +#endif /* __DSI_CONNECTOR_H__ */ + diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h new file mode 100644 index 000000000..d1b2a17b0 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -0,0 +1,788 @@ +#ifndef DSI_XML +#define DSI_XML + +/* Autogenerated file, DO NOT EDIT manually! + +This file was generated by the rules-ng-ng headergen tool in this git repository: +http://github.com/freedreno/envytools/ +git clone https://github.com/freedreno/envytools.git + +The rules-ng-ng source files this header was generated from are: +- /home/robclark/tmp/mesa/src/freedreno/registers/msm.xml ( 944 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2020-12-31 19:26:32) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi.xml ( 17560 bytes, from 2021-09-16 22:37:02) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-07-22 15:21:56) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 11007 bytes, from 2022-03-03 01:18:13) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-01-30 18:25:22) +- /home/robclark/tmp/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-01-30 18:25:22) + +Copyright (C) 2013-2021 by the following authors: +- Rob Clark (robclark) +- Ilia Mirkin (imirkin) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + + +enum dsi_traffic_mode { + NON_BURST_SYNCH_PULSE = 0, + NON_BURST_SYNCH_EVENT = 1, + BURST_MODE = 2, +}; + +enum dsi_vid_dst_format { + VID_DST_FORMAT_RGB565 = 0, + VID_DST_FORMAT_RGB666 = 1, + VID_DST_FORMAT_RGB666_LOOSE = 2, + VID_DST_FORMAT_RGB888 = 3, +}; + +enum dsi_rgb_swap { + SWAP_RGB = 0, + SWAP_RBG = 1, + SWAP_BGR = 2, + SWAP_BRG = 3, + SWAP_GRB = 4, + SWAP_GBR = 5, +}; + +enum dsi_cmd_trigger { + TRIGGER_NONE = 0, + TRIGGER_SEOF = 1, + TRIGGER_TE = 2, + TRIGGER_SW = 4, + TRIGGER_SW_SEOF = 5, + TRIGGER_SW_TE = 6, +}; + +enum dsi_cmd_dst_format { + CMD_DST_FORMAT_RGB111 = 0, + CMD_DST_FORMAT_RGB332 = 3, + CMD_DST_FORMAT_RGB444 = 4, + CMD_DST_FORMAT_RGB565 = 6, + CMD_DST_FORMAT_RGB666 = 7, + CMD_DST_FORMAT_RGB888 = 8, +}; + +enum dsi_lane_swap { + LANE_SWAP_0123 = 0, + LANE_SWAP_3012 = 1, + LANE_SWAP_2301 = 2, + LANE_SWAP_1230 = 3, + LANE_SWAP_0321 = 4, + LANE_SWAP_1032 = 5, + LANE_SWAP_2103 = 6, + LANE_SWAP_3210 = 7, +}; + +enum video_config_bpp { + VIDEO_CONFIG_18BPP = 0, + VIDEO_CONFIG_24BPP = 1, +}; + +enum video_pattern_sel { + VID_PRBS = 0, + VID_INCREMENTAL = 1, + VID_FIXED = 2, + VID_MDSS_GENERAL_PATTERN = 3, +}; + +enum cmd_mdp_stream0_pattern_sel { + CMD_MDP_PRBS = 0, + CMD_MDP_INCREMENTAL = 1, + CMD_MDP_FIXED = 2, + CMD_MDP_MDSS_GENERAL_PATTERN = 3, +}; + +enum cmd_dma_pattern_sel { + CMD_DMA_PRBS = 0, + CMD_DMA_INCREMENTAL = 1, + CMD_DMA_FIXED = 2, + CMD_DMA_CUSTOM_PATTERN_DMA_FIFO = 3, +}; + +#define DSI_IRQ_CMD_DMA_DONE 0x00000001 +#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002 +#define DSI_IRQ_CMD_MDP_DONE 0x00000100 +#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200 +#define DSI_IRQ_VIDEO_DONE 0x00010000 +#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000 +#define DSI_IRQ_BTA_DONE 0x00100000 +#define DSI_IRQ_MASK_BTA_DONE 0x00200000 +#define DSI_IRQ_ERROR 0x01000000 +#define DSI_IRQ_MASK_ERROR 0x02000000 +#define REG_DSI_6G_HW_VERSION 0x00000000 +#define DSI_6G_HW_VERSION_MAJOR__MASK 0xf0000000 +#define DSI_6G_HW_VERSION_MAJOR__SHIFT 28 +static inline uint32_t DSI_6G_HW_VERSION_MAJOR(uint32_t val) +{ + return ((val) << DSI_6G_HW_VERSION_MAJOR__SHIFT) & DSI_6G_HW_VERSION_MAJOR__MASK; +} +#define DSI_6G_HW_VERSION_MINOR__MASK 0x0fff0000 +#define DSI_6G_HW_VERSION_MINOR__SHIFT 16 +static inline uint32_t DSI_6G_HW_VERSION_MINOR(uint32_t val) +{ + return ((val) << DSI_6G_HW_VERSION_MINOR__SHIFT) & DSI_6G_HW_VERSION_MINOR__MASK; +} +#define DSI_6G_HW_VERSION_STEP__MASK 0x0000ffff +#define DSI_6G_HW_VERSION_STEP__SHIFT 0 +static inline uint32_t DSI_6G_HW_VERSION_STEP(uint32_t val) +{ + return ((val) << DSI_6G_HW_VERSION_STEP__SHIFT) & DSI_6G_HW_VERSION_STEP__MASK; +} + +#define REG_DSI_CTRL 0x00000000 +#define DSI_CTRL_ENABLE 0x00000001 +#define DSI_CTRL_VID_MODE_EN 0x00000002 +#define DSI_CTRL_CMD_MODE_EN 0x00000004 +#define DSI_CTRL_LANE0 0x00000010 +#define DSI_CTRL_LANE1 0x00000020 +#define DSI_CTRL_LANE2 0x00000040 +#define DSI_CTRL_LANE3 0x00000080 +#define DSI_CTRL_CLK_EN 0x00000100 +#define DSI_CTRL_ECC_CHECK 0x00100000 +#define DSI_CTRL_CRC_CHECK 0x01000000 + +#define REG_DSI_STATUS0 0x00000004 +#define DSI_STATUS0_CMD_MODE_ENGINE_BUSY 0x00000001 +#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002 +#define DSI_STATUS0_CMD_MODE_MDP_BUSY 0x00000004 +#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008 +#define DSI_STATUS0_DSI_BUSY 0x00000010 +#define DSI_STATUS0_INTERLEAVE_OP_CONTENTION 0x80000000 + +#define REG_DSI_FIFO_STATUS 0x00000008 +#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_OVERFLOW 0x00000001 +#define DSI_FIFO_STATUS_VIDEO_MDP_FIFO_UNDERFLOW 0x00000008 +#define DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW 0x00000080 +#define DSI_FIFO_STATUS_CMD_DMA_FIFO_RD_WATERMARK_REACH 0x00000100 +#define DSI_FIFO_STATUS_CMD_DMA_FIFO_WR_WATERMARK_REACH 0x00000200 +#define DSI_FIFO_STATUS_CMD_DMA_FIFO_UNDERFLOW 0x00000400 +#define DSI_FIFO_STATUS_DLN0_LP_FIFO_EMPTY 0x00001000 +#define DSI_FIFO_STATUS_DLN0_LP_FIFO_FULL 0x00002000 +#define DSI_FIFO_STATUS_DLN0_LP_FIFO_OVERFLOW 0x00004000 +#define DSI_FIFO_STATUS_DLN0_HS_FIFO_EMPTY 0x00010000 +#define DSI_FIFO_STATUS_DLN0_HS_FIFO_FULL 0x00020000 +#define DSI_FIFO_STATUS_DLN0_HS_FIFO_OVERFLOW 0x00040000 +#define DSI_FIFO_STATUS_DLN0_HS_FIFO_UNDERFLOW 0x00080000 +#define DSI_FIFO_STATUS_DLN1_HS_FIFO_EMPTY 0x00100000 +#define DSI_FIFO_STATUS_DLN1_HS_FIFO_FULL 0x00200000 +#define DSI_FIFO_STATUS_DLN1_HS_FIFO_OVERFLOW 0x00400000 +#define DSI_FIFO_STATUS_DLN1_HS_FIFO_UNDERFLOW 0x00800000 +#define DSI_FIFO_STATUS_DLN2_HS_FIFO_EMPTY 0x01000000 +#define DSI_FIFO_STATUS_DLN2_HS_FIFO_FULL 0x02000000 +#define DSI_FIFO_STATUS_DLN2_HS_FIFO_OVERFLOW 0x04000000 +#define DSI_FIFO_STATUS_DLN2_HS_FIFO_UNDERFLOW 0x08000000 +#define DSI_FIFO_STATUS_DLN3_HS_FIFO_EMPTY 0x10000000 +#define DSI_FIFO_STATUS_DLN3_HS_FIFO_FULL 0x20000000 +#define DSI_FIFO_STATUS_DLN3_HS_FIFO_OVERFLOW 0x40000000 +#define DSI_FIFO_STATUS_DLN3_HS_FIFO_UNDERFLOW 0x80000000 + +#define REG_DSI_VID_CFG0 0x0000000c +#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003 +#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0 +static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val) +{ + return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK; +} +#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030 +#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4 +static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_vid_dst_format val) +{ + return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK; +} +#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300 +#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8 +static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val) +{ + return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK; +} +#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000 +#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000 +#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000 +#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000 +#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000 +#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000 + +#define REG_DSI_VID_CFG1 0x0000001c +#define DSI_VID_CFG1_R_SEL 0x00000001 +#define DSI_VID_CFG1_G_SEL 0x00000010 +#define DSI_VID_CFG1_B_SEL 0x00000100 +#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00007000 +#define DSI_VID_CFG1_RGB_SWAP__SHIFT 12 +static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val) +{ + return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK; +} + +#define REG_DSI_ACTIVE_H 0x00000020 +#define DSI_ACTIVE_H_START__MASK 0x00000fff +#define DSI_ACTIVE_H_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_H_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK; +} +#define DSI_ACTIVE_H_END__MASK 0x0fff0000 +#define DSI_ACTIVE_H_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_H_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK; +} + +#define REG_DSI_ACTIVE_V 0x00000024 +#define DSI_ACTIVE_V_START__MASK 0x00000fff +#define DSI_ACTIVE_V_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_V_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK; +} +#define DSI_ACTIVE_V_END__MASK 0x0fff0000 +#define DSI_ACTIVE_V_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_V_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK; +} + +#define REG_DSI_TOTAL 0x00000028 +#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff +#define DSI_TOTAL_H_TOTAL__SHIFT 0 +static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val) +{ + return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK; +} +#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000 +#define DSI_TOTAL_V_TOTAL__SHIFT 16 +static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val) +{ + return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK; +} + +#define REG_DSI_ACTIVE_HSYNC 0x0000002c +#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff +#define DSI_ACTIVE_HSYNC_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK; +} +#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000 +#define DSI_ACTIVE_HSYNC_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK; +} + +#define REG_DSI_ACTIVE_VSYNC_HPOS 0x00000030 +#define DSI_ACTIVE_VSYNC_HPOS_START__MASK 0x00000fff +#define DSI_ACTIVE_VSYNC_HPOS_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_HPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_START__MASK; +} +#define DSI_ACTIVE_VSYNC_HPOS_END__MASK 0x0fff0000 +#define DSI_ACTIVE_VSYNC_HPOS_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_VSYNC_HPOS_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_HPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_HPOS_END__MASK; +} + +#define REG_DSI_ACTIVE_VSYNC_VPOS 0x00000034 +#define DSI_ACTIVE_VSYNC_VPOS_START__MASK 0x00000fff +#define DSI_ACTIVE_VSYNC_VPOS_START__SHIFT 0 +static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_START(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_VPOS_START__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_START__MASK; +} +#define DSI_ACTIVE_VSYNC_VPOS_END__MASK 0x0fff0000 +#define DSI_ACTIVE_VSYNC_VPOS_END__SHIFT 16 +static inline uint32_t DSI_ACTIVE_VSYNC_VPOS_END(uint32_t val) +{ + return ((val) << DSI_ACTIVE_VSYNC_VPOS_END__SHIFT) & DSI_ACTIVE_VSYNC_VPOS_END__MASK; +} + +#define REG_DSI_CMD_DMA_CTRL 0x00000038 +#define DSI_CMD_DMA_CTRL_BROADCAST_EN 0x80000000 +#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000 +#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000 + +#define REG_DSI_CMD_CFG0 0x0000003c +#define DSI_CMD_CFG0_DST_FORMAT__MASK 0x0000000f +#define DSI_CMD_CFG0_DST_FORMAT__SHIFT 0 +static inline uint32_t DSI_CMD_CFG0_DST_FORMAT(enum dsi_cmd_dst_format val) +{ + return ((val) << DSI_CMD_CFG0_DST_FORMAT__SHIFT) & DSI_CMD_CFG0_DST_FORMAT__MASK; +} +#define DSI_CMD_CFG0_R_SEL 0x00000010 +#define DSI_CMD_CFG0_G_SEL 0x00000100 +#define DSI_CMD_CFG0_B_SEL 0x00001000 +#define DSI_CMD_CFG0_INTERLEAVE_MAX__MASK 0x00f00000 +#define DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT 20 +static inline uint32_t DSI_CMD_CFG0_INTERLEAVE_MAX(uint32_t val) +{ + return ((val) << DSI_CMD_CFG0_INTERLEAVE_MAX__SHIFT) & DSI_CMD_CFG0_INTERLEAVE_MAX__MASK; +} +#define DSI_CMD_CFG0_RGB_SWAP__MASK 0x00070000 +#define DSI_CMD_CFG0_RGB_SWAP__SHIFT 16 +static inline uint32_t DSI_CMD_CFG0_RGB_SWAP(enum dsi_rgb_swap val) +{ + return ((val) << DSI_CMD_CFG0_RGB_SWAP__SHIFT) & DSI_CMD_CFG0_RGB_SWAP__MASK; +} + +#define REG_DSI_CMD_CFG1 0x00000040 +#define DSI_CMD_CFG1_WR_MEM_START__MASK 0x000000ff +#define DSI_CMD_CFG1_WR_MEM_START__SHIFT 0 +static inline uint32_t DSI_CMD_CFG1_WR_MEM_START(uint32_t val) +{ + return ((val) << DSI_CMD_CFG1_WR_MEM_START__SHIFT) & DSI_CMD_CFG1_WR_MEM_START__MASK; +} +#define DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK 0x0000ff00 +#define DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT 8 +static inline uint32_t DSI_CMD_CFG1_WR_MEM_CONTINUE(uint32_t val) +{ + return ((val) << DSI_CMD_CFG1_WR_MEM_CONTINUE__SHIFT) & DSI_CMD_CFG1_WR_MEM_CONTINUE__MASK; +} +#define DSI_CMD_CFG1_INSERT_DCS_COMMAND 0x00010000 + +#define REG_DSI_DMA_BASE 0x00000044 + +#define REG_DSI_DMA_LEN 0x00000048 + +#define REG_DSI_CMD_MDP_STREAM0_CTRL 0x00000054 +#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK 0x0000003f +#define DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT 0 +static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE__MASK; +} +#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300 +#define DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT 8 +static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL__MASK; +} +#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK 0xffff0000 +#define DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT 16 +static inline uint32_t DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT__MASK; +} + +#define REG_DSI_CMD_MDP_STREAM0_TOTAL 0x00000058 +#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK 0x00000fff +#define DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT 0 +static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL__MASK; +} +#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK 0x0fff0000 +#define DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT 16 +static inline uint32_t DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL__MASK; +} + +#define REG_DSI_CMD_MDP_STREAM1_CTRL 0x0000005c +#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK 0x0000003f +#define DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT 0 +static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_DATA_TYPE__MASK; +} +#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300 +#define DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT 8 +static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_VIRTUAL_CHANNEL__MASK; +} +#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK 0xffff0000 +#define DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT 16 +static inline uint32_t DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MDP_STREAM1_CTRL_WORD_COUNT__MASK; +} + +#define REG_DSI_CMD_MDP_STREAM1_TOTAL 0x00000060 +#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK 0x0000ffff +#define DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT 0 +static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_H_TOTAL__MASK; +} +#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK 0xffff0000 +#define DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT 16 +static inline uint32_t DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL(uint32_t val) +{ + return ((val) << DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__SHIFT) & DSI_CMD_MDP_STREAM1_TOTAL_V_TOTAL__MASK; +} + +#define REG_DSI_ACK_ERR_STATUS 0x00000064 + +static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; } + +static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; } + +#define REG_DSI_TRIG_CTRL 0x00000080 +#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x00000007 +#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0 +static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val) +{ + return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK; +} +#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x00000070 +#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4 +static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val) +{ + return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK; +} +#define DSI_TRIG_CTRL_STREAM__MASK 0x00000300 +#define DSI_TRIG_CTRL_STREAM__SHIFT 8 +static inline uint32_t DSI_TRIG_CTRL_STREAM(uint32_t val) +{ + return ((val) << DSI_TRIG_CTRL_STREAM__SHIFT) & DSI_TRIG_CTRL_STREAM__MASK; +} +#define DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME 0x00001000 +#define DSI_TRIG_CTRL_TE 0x80000000 + +#define REG_DSI_TRIG_DMA 0x0000008c + +#define REG_DSI_DLN0_PHY_ERR 0x000000b0 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_ESC 0x00000001 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC 0x00000010 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL 0x00000100 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 0x00001000 +#define DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1 0x00010000 + +#define REG_DSI_LP_TIMER_CTRL 0x000000b4 +#define DSI_LP_TIMER_CTRL_LP_RX_TO__MASK 0x0000ffff +#define DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT 0 +static inline uint32_t DSI_LP_TIMER_CTRL_LP_RX_TO(uint32_t val) +{ + return ((val) << DSI_LP_TIMER_CTRL_LP_RX_TO__SHIFT) & DSI_LP_TIMER_CTRL_LP_RX_TO__MASK; +} +#define DSI_LP_TIMER_CTRL_BTA_TO__MASK 0xffff0000 +#define DSI_LP_TIMER_CTRL_BTA_TO__SHIFT 16 +static inline uint32_t DSI_LP_TIMER_CTRL_BTA_TO(uint32_t val) +{ + return ((val) << DSI_LP_TIMER_CTRL_BTA_TO__SHIFT) & DSI_LP_TIMER_CTRL_BTA_TO__MASK; +} + +#define REG_DSI_HS_TIMER_CTRL 0x000000b8 +#define DSI_HS_TIMER_CTRL_HS_TX_TO__MASK 0x0000ffff +#define DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT 0 +static inline uint32_t DSI_HS_TIMER_CTRL_HS_TX_TO(uint32_t val) +{ + return ((val) << DSI_HS_TIMER_CTRL_HS_TX_TO__SHIFT) & DSI_HS_TIMER_CTRL_HS_TX_TO__MASK; +} +#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK 0x000f0000 +#define DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT 16 +static inline uint32_t DSI_HS_TIMER_CTRL_TIMER_RESOLUTION(uint32_t val) +{ + return ((val) << DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__SHIFT) & DSI_HS_TIMER_CTRL_TIMER_RESOLUTION__MASK; +} +#define DSI_HS_TIMER_CTRL_HS_TX_TO_STOP_EN 0x10000000 + +#define REG_DSI_TIMEOUT_STATUS 0x000000bc + +#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0 +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0 +static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val) +{ + return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK; +} +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00 +#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8 +static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val) +{ + return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK; +} + +#define REG_DSI_EOT_PACKET_CTRL 0x000000c8 +#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001 +#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010 + +#define REG_DSI_LANE_STATUS 0x000000a4 +#define DSI_LANE_STATUS_DLN0_STOPSTATE 0x00000001 +#define DSI_LANE_STATUS_DLN1_STOPSTATE 0x00000002 +#define DSI_LANE_STATUS_DLN2_STOPSTATE 0x00000004 +#define DSI_LANE_STATUS_DLN3_STOPSTATE 0x00000008 +#define DSI_LANE_STATUS_CLKLN_STOPSTATE 0x00000010 +#define DSI_LANE_STATUS_DLN0_ULPS_ACTIVE_NOT 0x00000100 +#define DSI_LANE_STATUS_DLN1_ULPS_ACTIVE_NOT 0x00000200 +#define DSI_LANE_STATUS_DLN2_ULPS_ACTIVE_NOT 0x00000400 +#define DSI_LANE_STATUS_DLN3_ULPS_ACTIVE_NOT 0x00000800 +#define DSI_LANE_STATUS_CLKLN_ULPS_ACTIVE_NOT 0x00001000 +#define DSI_LANE_STATUS_DLN0_DIRECTION 0x00010000 + +#define REG_DSI_LANE_CTRL 0x000000a8 +#define DSI_LANE_CTRL_HS_REQ_SEL_PHY 0x01000000 +#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000 + +#define REG_DSI_LANE_SWAP_CTRL 0x000000ac +#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007 +#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0 +static inline uint32_t DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(enum dsi_lane_swap val) +{ + return ((val) << DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT) & DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK; +} + +#define REG_DSI_ERR_INT_MASK0 0x00000108 + +#define REG_DSI_INTR_CTRL 0x0000010c + +#define REG_DSI_RESET 0x00000114 + +#define REG_DSI_CLK_CTRL 0x00000118 +#define DSI_CLK_CTRL_AHBS_HCLK_ON 0x00000001 +#define DSI_CLK_CTRL_AHBM_SCLK_ON 0x00000002 +#define DSI_CLK_CTRL_PCLK_ON 0x00000004 +#define DSI_CLK_CTRL_DSICLK_ON 0x00000008 +#define DSI_CLK_CTRL_BYTECLK_ON 0x00000010 +#define DSI_CLK_CTRL_ESCCLK_ON 0x00000020 +#define DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK 0x00000200 + +#define REG_DSI_CLK_STATUS 0x0000011c +#define DSI_CLK_STATUS_DSI_AON_AHBM_HCLK_ACTIVE 0x00000001 +#define DSI_CLK_STATUS_DSI_DYN_AHBM_HCLK_ACTIVE 0x00000002 +#define DSI_CLK_STATUS_DSI_AON_AHBS_HCLK_ACTIVE 0x00000004 +#define DSI_CLK_STATUS_DSI_DYN_AHBS_HCLK_ACTIVE 0x00000008 +#define DSI_CLK_STATUS_DSI_AON_DSICLK_ACTIVE 0x00000010 +#define DSI_CLK_STATUS_DSI_DYN_DSICLK_ACTIVE 0x00000020 +#define DSI_CLK_STATUS_DSI_AON_BYTECLK_ACTIVE 0x00000040 +#define DSI_CLK_STATUS_DSI_DYN_BYTECLK_ACTIVE 0x00000080 +#define DSI_CLK_STATUS_DSI_AON_ESCCLK_ACTIVE 0x00000100 +#define DSI_CLK_STATUS_DSI_AON_PCLK_ACTIVE 0x00000200 +#define DSI_CLK_STATUS_DSI_DYN_PCLK_ACTIVE 0x00000400 +#define DSI_CLK_STATUS_DSI_DYN_CMD_PCLK_ACTIVE 0x00001000 +#define DSI_CLK_STATUS_DSI_CMD_PCLK_ACTIVE 0x00002000 +#define DSI_CLK_STATUS_DSI_VID_PCLK_ACTIVE 0x00004000 +#define DSI_CLK_STATUS_DSI_CAM_BIST_PCLK_ACT 0x00008000 +#define DSI_CLK_STATUS_PLL_UNLOCKED 0x00010000 + +#define REG_DSI_PHY_RESET 0x00000128 +#define DSI_PHY_RESET_RESET 0x00000001 + +#define REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL 0x00000160 + +#define REG_DSI_TPG_MAIN_CONTROL 0x00000198 +#define DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN 0x00000100 + +#define REG_DSI_TPG_VIDEO_CONFIG 0x000001a0 +#define DSI_TPG_VIDEO_CONFIG_BPP__MASK 0x00000003 +#define DSI_TPG_VIDEO_CONFIG_BPP__SHIFT 0 +static inline uint32_t DSI_TPG_VIDEO_CONFIG_BPP(enum video_config_bpp val) +{ + return ((val) << DSI_TPG_VIDEO_CONFIG_BPP__SHIFT) & DSI_TPG_VIDEO_CONFIG_BPP__MASK; +} +#define DSI_TPG_VIDEO_CONFIG_RGB 0x00000004 + +#define REG_DSI_TEST_PATTERN_GEN_CTRL 0x00000158 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK 0x00030000 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT 16 +static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL(enum cmd_dma_pattern_sel val) +{ + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_PATTERN_SEL__MASK; +} +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK 0x00000300 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT 8 +static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(enum cmd_mdp_stream0_pattern_sel val) +{ + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL__MASK; +} +#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK 0x00000030 +#define DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT 4 +static inline uint32_t DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(enum video_pattern_sel val) +{ + return ((val) << DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__SHIFT) & DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL__MASK; +} +#define DSI_TEST_PATTERN_GEN_CTRL_TPG_DMA_FIFO_MODE 0x00000004 +#define DSI_TEST_PATTERN_GEN_CTRL_CMD_DMA_TPG_EN 0x00000002 +#define DSI_TEST_PATTERN_GEN_CTRL_EN 0x00000001 + +#define REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 0x00000168 + +#define REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER 0x00000180 +#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER 0x00000001 + +#define REG_DSI_TPG_MAIN_CONTROL2 0x0000019c +#define DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN 0x00000080 +#define DSI_TPG_MAIN_CONTROL2_CMD_MDP1_CHECKERED_RECTANGLE_PATTERN 0x00010000 +#define DSI_TPG_MAIN_CONTROL2_CMD_MDP2_CHECKERED_RECTANGLE_PATTERN 0x02000000 + +#define REG_DSI_T_CLK_PRE_EXTEND 0x0000017c +#define DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK 0x00000001 + +#define REG_DSI_CMD_MODE_MDP_CTRL2 0x000001b4 +#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK 0x0000000f +#define DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT 0 +static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2(enum dsi_cmd_dst_format val) +{ + return ((val) << DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_DST_FORMAT2__MASK; +} +#define DSI_CMD_MODE_MDP_CTRL2_R_SEL 0x00000010 +#define DSI_CMD_MODE_MDP_CTRL2_G_SEL 0x00000020 +#define DSI_CMD_MODE_MDP_CTRL2_B_SEL 0x00000040 +#define DSI_CMD_MODE_MDP_CTRL2_BYTE_MSB_LSB_FLIP 0x00000080 +#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK 0x00000700 +#define DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT 8 +static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP(enum dsi_rgb_swap val) +{ + return ((val) << DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_RGB_SWAP__MASK; +} +#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK 0x00007000 +#define DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT 12 +static inline uint32_t DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP(enum dsi_rgb_swap val) +{ + return ((val) << DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__SHIFT) & DSI_CMD_MODE_MDP_CTRL2_INPUT_RGB_SWAP__MASK; +} +#define DSI_CMD_MODE_MDP_CTRL2_BURST_MODE 0x00010000 + +#define REG_DSI_CMD_MODE_MDP_STREAM2_CTRL 0x000001b8 +#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK 0x0000003f +#define DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT 0 +static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE(uint32_t val) +{ + return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_DATA_TYPE__MASK; +} +#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK 0x00000300 +#define DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT 8 +static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL(uint32_t val) +{ + return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_VIRTUAL_CHANNEL__MASK; +} +#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK 0xffff0000 +#define DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT 16 +static inline uint32_t DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT(uint32_t val) +{ + return ((val) << DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__SHIFT) & DSI_CMD_MODE_MDP_STREAM2_CTRL_WORD_COUNT__MASK; +} + +#define REG_DSI_RDBK_DATA_CTRL 0x000001d0 +#define DSI_RDBK_DATA_CTRL_COUNT__MASK 0x00ff0000 +#define DSI_RDBK_DATA_CTRL_COUNT__SHIFT 16 +static inline uint32_t DSI_RDBK_DATA_CTRL_COUNT(uint32_t val) +{ + return ((val) << DSI_RDBK_DATA_CTRL_COUNT__SHIFT) & DSI_RDBK_DATA_CTRL_COUNT__MASK; +} +#define DSI_RDBK_DATA_CTRL_CLR 0x00000001 + +#define REG_DSI_VERSION 0x000001f0 +#define DSI_VERSION_MAJOR__MASK 0xff000000 +#define DSI_VERSION_MAJOR__SHIFT 24 +static inline uint32_t DSI_VERSION_MAJOR(uint32_t val) +{ + return ((val) << DSI_VERSION_MAJOR__SHIFT) & DSI_VERSION_MAJOR__MASK; +} + +#define REG_DSI_CPHY_MODE_CTRL 0x000002d4 + +#define REG_DSI_VIDEO_COMPRESSION_MODE_CTRL 0x0000029c +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK 0xffff0000 +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT 16 +static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(uint32_t val) +{ + return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK; +} +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK 0x00003f00 +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT 8 +static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(uint32_t val) +{ + return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK; +} +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK 0x000000c0 +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT 6 +static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(uint32_t val) +{ + return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK; +} +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK 0x00000030 +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT 4 +static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(uint32_t val) +{ + return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK; +} +#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EN 0x00000001 + +#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL 0x000002a4 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK 0x3f000000 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT 24 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK; +} +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK 0x00c00000 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT 22 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK; +} +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK 0x00300000 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT 20 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK; +} +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EN 0x00010000 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK 0x00003f00 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT 8 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK; +} +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK 0x000000c0 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT 6 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK; +} +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK 0x00000030 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT 4 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK; +} +#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EN 0x00000001 + +#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2 0x000002a8 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK 0xffff0000 +#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT 16 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK; +} +#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK 0x0000ffff +#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT 0 +static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(uint32_t val) +{ + return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK; +} + +#endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c new file mode 100644 index 000000000..e0bd452a9 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include "dsi_cfg.h" + +static const char * const dsi_v2_bus_clk_names[] = { + "core_mmss", "iface", "bus", +}; + +static const struct regulator_bulk_data apq8064_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config apq8064_dsi_cfg = { + .io_offset = 0, + .regulator_data = apq8064_dsi_regulators, + .num_regulators = ARRAY_SIZE(apq8064_dsi_regulators), + .bus_clk_names = dsi_v2_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), + .io_start = { 0x4700000, 0x5800000 }, + .num_dsi = 2, +}; + +static const char * const dsi_6g_bus_clk_names[] = { + "mdp_core", "iface", "bus", "core_mmss", +}; + +static const struct regulator_bulk_data msm8974_apq8084_regulators[] = { + { .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */ + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8974_apq8084_regulators, + .num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators), + .bus_clk_names = dsi_6g_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), + .io_start = { 0xfd922800, 0xfd922b00 }, + .num_dsi = 2, +}; + +static const char * const dsi_8916_bus_clk_names[] = { + "mdp_core", "iface", "bus", +}; + +static const struct regulator_bulk_data msm8916_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config msm8916_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8916_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8916_dsi_regulators), + .bus_clk_names = dsi_8916_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names), + .io_start = { 0x1a98000 }, + .num_dsi = 1, +}; + +static const char * const dsi_8976_bus_clk_names[] = { + "mdp_core", "iface", "bus", +}; + +static const struct regulator_bulk_data msm8976_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config msm8976_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8976_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8976_dsi_regulators), + .bus_clk_names = dsi_8976_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names), + .io_start = { 0x1a94000, 0x1a96000 }, + .num_dsi = 2, +}; + +static const struct regulator_bulk_data msm8994_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */ + { .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "lab_reg", .init_load_uA = -1 }, + { .supply = "ibb_reg", .init_load_uA = -1 }, +}; + +static const struct msm_dsi_config msm8994_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8994_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8994_dsi_regulators), + .bus_clk_names = dsi_6g_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), + .io_start = { 0xfd998000, 0xfd9a0000 }, + .num_dsi = 2, +}; + +static const char * const dsi_8996_bus_clk_names[] = { + "mdp_core", "iface", "bus", "core_mmss", +}; + +static const struct regulator_bulk_data msm8996_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */ + { .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + +static const struct msm_dsi_config msm8996_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8996_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8996_dsi_regulators), + .bus_clk_names = dsi_8996_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names), + .io_start = { 0x994000, 0x996000 }, + .num_dsi = 2, +}; + +static const char * const dsi_msm8998_bus_clk_names[] = { + "iface", "bus", "core", +}; + +static const struct regulator_bulk_data msm8998_dsi_regulators[] = { + { .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */ + { .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config msm8998_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = msm8998_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8998_dsi_regulators), + .bus_clk_names = dsi_msm8998_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names), + .io_start = { 0xc994000, 0xc996000 }, + .num_dsi = 2, +}; + +static const char * const dsi_sdm660_bus_clk_names[] = { + "iface", "bus", "core", "core_mmss", +}; + +static const struct regulator_bulk_data sdm660_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sdm660_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sdm660_dsi_regulators, + .num_regulators = ARRAY_SIZE(sdm660_dsi_regulators), + .bus_clk_names = dsi_sdm660_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names), + .io_start = { 0xc994000, 0xc996000 }, + .num_dsi = 2, +}; + +static const char * const dsi_sdm845_bus_clk_names[] = { + "iface", "bus", +}; + +static const char * const dsi_sc7180_bus_clk_names[] = { + "iface", "bus", +}; + +static const struct regulator_bulk_data sdm845_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sdm845_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sdm845_dsi_regulators, + .num_regulators = ARRAY_SIZE(sdm845_dsi_regulators), + .bus_clk_names = dsi_sdm845_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names), + .io_start = { 0xae94000, 0xae96000 }, + .num_dsi = 2, +}; + +static const struct regulator_bulk_data sc7180_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sc7180_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sc7180_dsi_regulators, + .num_regulators = ARRAY_SIZE(sc7180_dsi_regulators), + .bus_clk_names = dsi_sc7180_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names), + .io_start = { 0xae94000 }, + .num_dsi = 1, +}; + +static const char * const dsi_sc7280_bus_clk_names[] = { + "iface", "bus", +}; + +static const struct regulator_bulk_data sc7280_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config sc7280_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = sc7280_dsi_regulators, + .num_regulators = ARRAY_SIZE(sc7280_dsi_regulators), + .bus_clk_names = dsi_sc7280_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names), + .io_start = { 0xae94000, 0xae96000 }, + .num_dsi = 2, +}; + +static const char * const dsi_qcm2290_bus_clk_names[] = { + "iface", "bus", +}; + +static const struct regulator_bulk_data qcm2290_dsi_cfg_regulators[] = { + { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */ +}; + +static const struct msm_dsi_config qcm2290_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .regulator_data = qcm2290_dsi_cfg_regulators, + .num_regulators = ARRAY_SIZE(qcm2290_dsi_cfg_regulators), + .bus_clk_names = dsi_qcm2290_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names), + .io_start = { 0x5e94000 }, + .num_dsi = 1, +}; + +static const struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_v2, + .link_clk_enable = dsi_link_clk_enable_v2, + .link_clk_disable = dsi_link_clk_disable_v2, + .clk_init_ver = dsi_clk_init_v2, + .tx_buf_alloc = dsi_tx_buf_alloc_v2, + .tx_buf_get = dsi_tx_buf_get_v2, + .tx_buf_put = NULL, + .dma_base_get = dsi_dma_base_get_v2, + .calc_clk_rate = dsi_calc_clk_rate_v2, +}; + +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_6g, + .link_clk_enable = dsi_link_clk_enable_6g, + .link_clk_disable = dsi_link_clk_disable_6g, + .clk_init_ver = NULL, + .tx_buf_alloc = dsi_tx_buf_alloc_6g, + .tx_buf_get = dsi_tx_buf_get_6g, + .tx_buf_put = dsi_tx_buf_put_6g, + .dma_base_get = dsi_dma_base_get_6g, + .calc_clk_rate = dsi_calc_clk_rate_6g, +}; + +static const struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = { + .link_clk_set_rate = dsi_link_clk_set_rate_6g, + .link_clk_enable = dsi_link_clk_enable_6g, + .link_clk_disable = dsi_link_clk_disable_6g, + .clk_init_ver = dsi_clk_init_6g_v2, + .tx_buf_alloc = dsi_tx_buf_alloc_6g, + .tx_buf_get = dsi_tx_buf_get_6g, + .tx_buf_put = dsi_tx_buf_put_6g, + .dma_base_get = dsi_dma_base_get_6g, + .calc_clk_rate = dsi_calc_clk_rate_6g, +}; + +static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { + {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, + &apq8064_dsi_cfg, &msm_dsi_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2, + &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, + &msm8994_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, + &msm8916_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, + &msm8996_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_2, + &msm8976_dsi_cfg, &msm_dsi_6g_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_1_0, + &sdm660_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_0, + &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0, + &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1, + &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0, + &sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops}, +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) +{ + const struct msm_dsi_cfg_handler *cfg_hnd = NULL; + int i; + + for (i = ARRAY_SIZE(dsi_cfg_handlers) - 1; i >= 0; i--) { + if ((dsi_cfg_handlers[i].major == major) && + (dsi_cfg_handlers[i].minor == minor)) { + cfg_hnd = &dsi_cfg_handlers[i]; + break; + } + } + + return cfg_hnd; +} + +/* Non autodetect configs */ +const struct msm_dsi_cfg_handler qcm2290_dsi_cfg_handler = { + .cfg = &qcm2290_dsi_cfg, + .ops = &msm_dsi_6g_v2_host_ops, +}; diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h new file mode 100644 index 000000000..8f04e685a --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#ifndef __MSM_DSI_CFG_H__ +#define __MSM_DSI_CFG_H__ + +#include "dsi.h" + +#define MSM_DSI_VER_MAJOR_V2 0x02 +#define MSM_DSI_VER_MAJOR_6G 0x03 +#define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 +#define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 +#define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 +#define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 +#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 +#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 +#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 +#define MSM_DSI_6G_VER_MINOR_V1_4_2 0x10040002 +#define MSM_DSI_6G_VER_MINOR_V2_1_0 0x20010000 +#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000 +#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 +#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000 +#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000 +#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001 +#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000 + +#define MSM_DSI_V2_VER_MINOR_8064 0x0 + +#define DSI_6G_REG_SHIFT 4 + +struct msm_dsi_config { + u32 io_offset; + const struct regulator_bulk_data *regulator_data; + int num_regulators; + const char * const *bus_clk_names; + const int num_bus_clks; + const resource_size_t io_start[DSI_MAX]; + const int num_dsi; +}; + +struct msm_dsi_host_cfg_ops { + int (*link_clk_set_rate)(struct msm_dsi_host *msm_host); + int (*link_clk_enable)(struct msm_dsi_host *msm_host); + void (*link_clk_disable)(struct msm_dsi_host *msm_host); + int (*clk_init_ver)(struct msm_dsi_host *msm_host); + int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size); + void* (*tx_buf_get)(struct msm_dsi_host *msm_host); + void (*tx_buf_put)(struct msm_dsi_host *msm_host); + int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova); + int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_bonded_dsi); +}; + +struct msm_dsi_cfg_handler { + u32 major; + u32 minor; + const struct msm_dsi_config *cfg; + const struct msm_dsi_host_cfg_ops *ops; +}; + +const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor); + +/* Non autodetect configs */ +extern const struct msm_dsi_cfg_handler qcm2290_dsi_cfg_handler; + +#endif /* __MSM_DSI_CFG_H__ */ + diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c new file mode 100644 index 000000000..a7c6e8a17 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -0,0 +1,2630 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include