summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/adreno
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/adreno')
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h3236
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c569
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h22
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h3247
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c615
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h4361
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c741
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h5498
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c159
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c1789
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h174
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c390
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c302
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h8256
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c1738
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h199
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h485
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2381
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h110
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c1401
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h482
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c767
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h184
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h697
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c889
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1131
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h562
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h2444
30 files changed, 42878 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 0000000000..f87a1312f5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,3236 @@
+#ifndef A2XX_XML
+#define A2XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2023 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a2xx_rb_dither_type {
+ DITHER_PIXEL = 0,
+ DITHER_SUBPIXEL = 1,
+};
+
+enum a2xx_colorformatx {
+ COLORX_4_4_4_4 = 0,
+ COLORX_1_5_5_5 = 1,
+ COLORX_5_6_5 = 2,
+ COLORX_8 = 3,
+ COLORX_8_8 = 4,
+ COLORX_8_8_8_8 = 5,
+ COLORX_S8_8_8_8 = 6,
+ COLORX_16_FLOAT = 7,
+ COLORX_16_16_FLOAT = 8,
+ COLORX_16_16_16_16_FLOAT = 9,
+ COLORX_32_FLOAT = 10,
+ COLORX_32_32_FLOAT = 11,
+ COLORX_32_32_32_32_FLOAT = 12,
+ COLORX_2_3_3 = 13,
+ COLORX_8_8_8 = 14,
+};
+
+enum a2xx_sq_surfaceformat {
+ FMT_1_REVERSE = 0,
+ FMT_1 = 1,
+ FMT_8 = 2,
+ FMT_1_5_5_5 = 3,
+ FMT_5_6_5 = 4,
+ FMT_6_5_5 = 5,
+ FMT_8_8_8_8 = 6,
+ FMT_2_10_10_10 = 7,
+ FMT_8_A = 8,
+ FMT_8_B = 9,
+ FMT_8_8 = 10,
+ FMT_Cr_Y1_Cb_Y0 = 11,
+ FMT_Y1_Cr_Y0_Cb = 12,
+ FMT_5_5_5_1 = 13,
+ FMT_8_8_8_8_A = 14,
+ FMT_4_4_4_4 = 15,
+ FMT_8_8_8 = 16,
+ FMT_DXT1 = 18,
+ FMT_DXT2_3 = 19,
+ FMT_DXT4_5 = 20,
+ FMT_10_10_10_2 = 21,
+ FMT_24_8 = 22,
+ FMT_16 = 24,
+ FMT_16_16 = 25,
+ FMT_16_16_16_16 = 26,
+ FMT_16_EXPAND = 27,
+ FMT_16_16_EXPAND = 28,
+ FMT_16_16_16_16_EXPAND = 29,
+ FMT_16_FLOAT = 30,
+ FMT_16_16_FLOAT = 31,
+ FMT_16_16_16_16_FLOAT = 32,
+ FMT_32 = 33,
+ FMT_32_32 = 34,
+ FMT_32_32_32_32 = 35,
+ FMT_32_FLOAT = 36,
+ FMT_32_32_FLOAT = 37,
+ FMT_32_32_32_32_FLOAT = 38,
+ FMT_ATI_TC_RGB = 39,
+ FMT_ATI_TC_RGBA = 40,
+ FMT_ATI_TC_555_565_RGB = 41,
+ FMT_ATI_TC_555_565_RGBA = 42,
+ FMT_ATI_TC_RGBA_INTERP = 43,
+ FMT_ATI_TC_555_565_RGBA_INTERP = 44,
+ FMT_ETC1_RGBA_INTERP = 46,
+ FMT_ETC1_RGB = 47,
+ FMT_ETC1_RGBA = 48,
+ FMT_DXN = 49,
+ FMT_2_3_3 = 51,
+ FMT_2_10_10_10_AS_16_16_16_16 = 54,
+ FMT_10_10_10_2_AS_16_16_16_16 = 55,
+ FMT_32_32_32_FLOAT = 57,
+ FMT_DXT3A = 58,
+ FMT_DXT5A = 59,
+ FMT_CTX1 = 60,
+};
+
+enum a2xx_sq_ps_vtx_mode {
+ POSITION_1_VECTOR = 0,
+ POSITION_2_VECTORS_UNUSED = 1,
+ POSITION_2_VECTORS_SPRITE = 2,
+ POSITION_2_VECTORS_EDGE = 3,
+ POSITION_2_VECTORS_KILL = 4,
+ POSITION_2_VECTORS_SPRITE_KILL = 5,
+ POSITION_2_VECTORS_EDGE_KILL = 6,
+ MULTIPASS = 7,
+};
+
+enum a2xx_sq_sample_cntl {
+ CENTROIDS_ONLY = 0,
+ CENTERS_ONLY = 1,
+ CENTROIDS_AND_CENTERS = 2,
+};
+
+enum a2xx_dx_clip_space {
+ DXCLIP_OPENGL = 0,
+ DXCLIP_DIRECTX = 1,
+};
+
+enum a2xx_pa_su_sc_polymode {
+ POLY_DISABLED = 0,
+ POLY_DUALMODE = 1,
+};
+
+enum a2xx_rb_edram_mode {
+ EDRAM_NOP = 0,
+ COLOR_DEPTH = 4,
+ DEPTH_ONLY = 5,
+ EDRAM_COPY = 6,
+};
+
+enum a2xx_pa_sc_pattern_bit_order {
+ LITTLE = 0,
+ BIG = 1,
+};
+
+enum a2xx_pa_sc_auto_reset_cntl {
+ NEVER = 0,
+ EACH_PRIMITIVE = 1,
+ EACH_PACKET = 2,
+};
+
+enum a2xx_pa_pixcenter {
+ PIXCENTER_D3D = 0,
+ PIXCENTER_OGL = 1,
+};
+
+enum a2xx_pa_roundmode {
+ TRUNCATE = 0,
+ ROUND = 1,
+ ROUNDTOEVEN = 2,
+ ROUNDTOODD = 3,
+};
+
+enum a2xx_pa_quantmode {
+ ONE_SIXTEENTH = 0,
+ ONE_EIGTH = 1,
+ ONE_QUARTER = 2,
+ ONE_HALF = 3,
+ ONE = 4,
+};
+
+enum a2xx_rb_copy_sample_select {
+ SAMPLE_0 = 0,
+ SAMPLE_1 = 1,
+ SAMPLE_2 = 2,
+ SAMPLE_3 = 3,
+ SAMPLE_01 = 4,
+ SAMPLE_23 = 5,
+ SAMPLE_0123 = 6,
+};
+
+enum a2xx_rb_blend_opcode {
+ BLEND2_DST_PLUS_SRC = 0,
+ BLEND2_SRC_MINUS_DST = 1,
+ BLEND2_MIN_DST_SRC = 2,
+ BLEND2_MAX_DST_SRC = 3,
+ BLEND2_DST_MINUS_SRC = 4,
+ BLEND2_DST_PLUS_SRC_BIAS = 5,
+};
+
+enum a2xx_su_perfcnt_select {
+ PERF_PAPC_PASX_REQ = 0,
+ PERF_PAPC_PASX_FIRST_VECTOR = 2,
+ PERF_PAPC_PASX_SECOND_VECTOR = 3,
+ PERF_PAPC_PASX_FIRST_DEAD = 4,
+ PERF_PAPC_PASX_SECOND_DEAD = 5,
+ PERF_PAPC_PASX_VTX_KILL_DISCARD = 6,
+ PERF_PAPC_PASX_VTX_NAN_DISCARD = 7,
+ PERF_PAPC_PA_INPUT_PRIM = 8,
+ PERF_PAPC_PA_INPUT_NULL_PRIM = 9,
+ PERF_PAPC_PA_INPUT_EVENT_FLAG = 10,
+ PERF_PAPC_PA_INPUT_FIRST_PRIM_SLOT = 11,
+ PERF_PAPC_PA_INPUT_END_OF_PACKET = 12,
+ PERF_PAPC_CLPR_CULL_PRIM = 13,
+ PERF_PAPC_CLPR_VV_CULL_PRIM = 15,
+ PERF_PAPC_CLPR_VTX_KILL_CULL_PRIM = 17,
+ PERF_PAPC_CLPR_VTX_NAN_CULL_PRIM = 18,
+ PERF_PAPC_CLPR_CULL_TO_NULL_PRIM = 19,
+ PERF_PAPC_CLPR_VV_CLIP_PRIM = 21,
+ PERF_PAPC_CLPR_POINT_CLIP_CANDIDATE = 23,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_1 = 24,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_2 = 25,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_3 = 26,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_4 = 27,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_5 = 28,
+ PERF_PAPC_CLPR_CLIP_PLANE_CNT_6 = 29,
+ PERF_PAPC_CLPR_CLIP_PLANE_NEAR = 30,
+ PERF_PAPC_CLPR_CLIP_PLANE_FAR = 31,
+ PERF_PAPC_CLPR_CLIP_PLANE_LEFT = 32,
+ PERF_PAPC_CLPR_CLIP_PLANE_RIGHT = 33,
+ PERF_PAPC_CLPR_CLIP_PLANE_TOP = 34,
+ PERF_PAPC_CLPR_CLIP_PLANE_BOTTOM = 35,
+ PERF_PAPC_CLSM_NULL_PRIM = 36,
+ PERF_PAPC_CLSM_TOTALLY_VISIBLE_PRIM = 37,
+ PERF_PAPC_CLSM_CLIP_PRIM = 38,
+ PERF_PAPC_CLSM_CULL_TO_NULL_PRIM = 39,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_1 = 40,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_2 = 41,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_3 = 42,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_4 = 43,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_5 = 44,
+ PERF_PAPC_CLSM_OUT_PRIM_CNT_6_7 = 45,
+ PERF_PAPC_CLSM_NON_TRIVIAL_CULL = 46,
+ PERF_PAPC_SU_INPUT_PRIM = 47,
+ PERF_PAPC_SU_INPUT_CLIP_PRIM = 48,
+ PERF_PAPC_SU_INPUT_NULL_PRIM = 49,
+ PERF_PAPC_SU_ZERO_AREA_CULL_PRIM = 50,
+ PERF_PAPC_SU_BACK_FACE_CULL_PRIM = 51,
+ PERF_PAPC_SU_FRONT_FACE_CULL_PRIM = 52,
+ PERF_PAPC_SU_POLYMODE_FACE_CULL = 53,
+ PERF_PAPC_SU_POLYMODE_BACK_CULL = 54,
+ PERF_PAPC_SU_POLYMODE_FRONT_CULL = 55,
+ PERF_PAPC_SU_POLYMODE_INVALID_FILL = 56,
+ PERF_PAPC_SU_OUTPUT_PRIM = 57,
+ PERF_PAPC_SU_OUTPUT_CLIP_PRIM = 58,
+ PERF_PAPC_SU_OUTPUT_NULL_PRIM = 59,
+ PERF_PAPC_SU_OUTPUT_EVENT_FLAG = 60,
+ PERF_PAPC_SU_OUTPUT_FIRST_PRIM_SLOT = 61,
+ PERF_PAPC_SU_OUTPUT_END_OF_PACKET = 62,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_FACE = 63,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_BACK = 64,
+ PERF_PAPC_SU_OUTPUT_POLYMODE_FRONT = 65,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_FACE = 66,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_BACK = 67,
+ PERF_PAPC_SU_OUT_CLIP_POLYMODE_FRONT = 68,
+ PERF_PAPC_PASX_REQ_IDLE = 69,
+ PERF_PAPC_PASX_REQ_BUSY = 70,
+ PERF_PAPC_PASX_REQ_STALLED = 71,
+ PERF_PAPC_PASX_REC_IDLE = 72,
+ PERF_PAPC_PASX_REC_BUSY = 73,
+ PERF_PAPC_PASX_REC_STARVED_SX = 74,
+ PERF_PAPC_PASX_REC_STALLED = 75,
+ PERF_PAPC_PASX_REC_STALLED_POS_MEM = 76,
+ PERF_PAPC_PASX_REC_STALLED_CCGSM_IN = 77,
+ PERF_PAPC_CCGSM_IDLE = 78,
+ PERF_PAPC_CCGSM_BUSY = 79,
+ PERF_PAPC_CCGSM_STALLED = 80,
+ PERF_PAPC_CLPRIM_IDLE = 81,
+ PERF_PAPC_CLPRIM_BUSY = 82,
+ PERF_PAPC_CLPRIM_STALLED = 83,
+ PERF_PAPC_CLPRIM_STARVED_CCGSM = 84,
+ PERF_PAPC_CLIPSM_IDLE = 85,
+ PERF_PAPC_CLIPSM_BUSY = 86,
+ PERF_PAPC_CLIPSM_WAIT_CLIP_VERT_ENGH = 87,
+ PERF_PAPC_CLIPSM_WAIT_HIGH_PRI_SEQ = 88,
+ PERF_PAPC_CLIPSM_WAIT_CLIPGA = 89,
+ PERF_PAPC_CLIPSM_WAIT_AVAIL_VTE_CLIP = 90,
+ PERF_PAPC_CLIPSM_WAIT_CLIP_OUTSM = 91,
+ PERF_PAPC_CLIPGA_IDLE = 92,
+ PERF_PAPC_CLIPGA_BUSY = 93,
+ PERF_PAPC_CLIPGA_STARVED_VTE_CLIP = 94,
+ PERF_PAPC_CLIPGA_STALLED = 95,
+ PERF_PAPC_CLIP_IDLE = 96,
+ PERF_PAPC_CLIP_BUSY = 97,
+ PERF_PAPC_SU_IDLE = 98,
+ PERF_PAPC_SU_BUSY = 99,
+ PERF_PAPC_SU_STARVED_CLIP = 100,
+ PERF_PAPC_SU_STALLED_SC = 101,
+ PERF_PAPC_SU_FACENESS_CULL = 102,
+};
+
+enum a2xx_sc_perfcnt_select {
+ SC_SR_WINDOW_VALID = 0,
+ SC_CW_WINDOW_VALID = 1,
+ SC_QM_WINDOW_VALID = 2,
+ SC_FW_WINDOW_VALID = 3,
+ SC_EZ_WINDOW_VALID = 4,
+ SC_IT_WINDOW_VALID = 5,
+ SC_STARVED_BY_PA = 6,
+ SC_STALLED_BY_RB_TILE = 7,
+ SC_STALLED_BY_RB_SAMP = 8,
+ SC_STARVED_BY_RB_EZ = 9,
+ SC_STALLED_BY_SAMPLE_FF = 10,
+ SC_STALLED_BY_SQ = 11,
+ SC_STALLED_BY_SP = 12,
+ SC_TOTAL_NO_PRIMS = 13,
+ SC_NON_EMPTY_PRIMS = 14,
+ SC_NO_TILES_PASSING_QM = 15,
+ SC_NO_PIXELS_PRE_EZ = 16,
+ SC_NO_PIXELS_POST_EZ = 17,
+};
+
+enum a2xx_vgt_perfcount_select {
+ VGT_SQ_EVENT_WINDOW_ACTIVE = 0,
+ VGT_SQ_SEND = 1,
+ VGT_SQ_STALLED = 2,
+ VGT_SQ_STARVED_BUSY = 3,
+ VGT_SQ_STARVED_IDLE = 4,
+ VGT_SQ_STATIC = 5,
+ VGT_PA_EVENT_WINDOW_ACTIVE = 6,
+ VGT_PA_CLIP_V_SEND = 7,
+ VGT_PA_CLIP_V_STALLED = 8,
+ VGT_PA_CLIP_V_STARVED_BUSY = 9,
+ VGT_PA_CLIP_V_STARVED_IDLE = 10,
+ VGT_PA_CLIP_V_STATIC = 11,
+ VGT_PA_CLIP_P_SEND = 12,
+ VGT_PA_CLIP_P_STALLED = 13,
+ VGT_PA_CLIP_P_STARVED_BUSY = 14,
+ VGT_PA_CLIP_P_STARVED_IDLE = 15,
+ VGT_PA_CLIP_P_STATIC = 16,
+ VGT_PA_CLIP_S_SEND = 17,
+ VGT_PA_CLIP_S_STALLED = 18,
+ VGT_PA_CLIP_S_STARVED_BUSY = 19,
+ VGT_PA_CLIP_S_STARVED_IDLE = 20,
+ VGT_PA_CLIP_S_STATIC = 21,
+ RBIU_FIFOS_EVENT_WINDOW_ACTIVE = 22,
+ RBIU_IMMED_DATA_FIFO_STARVED = 23,
+ RBIU_IMMED_DATA_FIFO_STALLED = 24,
+ RBIU_DMA_REQUEST_FIFO_STARVED = 25,
+ RBIU_DMA_REQUEST_FIFO_STALLED = 26,
+ RBIU_DRAW_INITIATOR_FIFO_STARVED = 27,
+ RBIU_DRAW_INITIATOR_FIFO_STALLED = 28,
+ BIN_PRIM_NEAR_CULL = 29,
+ BIN_PRIM_ZERO_CULL = 30,
+ BIN_PRIM_FAR_CULL = 31,
+ BIN_PRIM_BIN_CULL = 32,
+ BIN_PRIM_FACE_CULL = 33,
+ SPARE34 = 34,
+ SPARE35 = 35,
+ SPARE36 = 36,
+ SPARE37 = 37,
+ SPARE38 = 38,
+ SPARE39 = 39,
+ TE_SU_IN_VALID = 40,
+ TE_SU_IN_READ = 41,
+ TE_SU_IN_PRIM = 42,
+ TE_SU_IN_EOP = 43,
+ TE_SU_IN_NULL_PRIM = 44,
+ TE_WK_IN_VALID = 45,
+ TE_WK_IN_READ = 46,
+ TE_OUT_PRIM_VALID = 47,
+ TE_OUT_PRIM_READ = 48,
+};
+
+enum a2xx_tcr_perfcount_select {
+ DGMMPD_IPMUX0_STALL = 0,
+ DGMMPD_IPMUX_ALL_STALL = 4,
+ OPMUX0_L2_WRITES = 5,
+};
+
+enum a2xx_tp_perfcount_select {
+ POINT_QUADS = 0,
+ BILIN_QUADS = 1,
+ ANISO_QUADS = 2,
+ MIP_QUADS = 3,
+ VOL_QUADS = 4,
+ MIP_VOL_QUADS = 5,
+ MIP_ANISO_QUADS = 6,
+ VOL_ANISO_QUADS = 7,
+ ANISO_2_1_QUADS = 8,
+ ANISO_4_1_QUADS = 9,
+ ANISO_6_1_QUADS = 10,
+ ANISO_8_1_QUADS = 11,
+ ANISO_10_1_QUADS = 12,
+ ANISO_12_1_QUADS = 13,
+ ANISO_14_1_QUADS = 14,
+ ANISO_16_1_QUADS = 15,
+ MIP_VOL_ANISO_QUADS = 16,
+ ALIGN_2_QUADS = 17,
+ ALIGN_4_QUADS = 18,
+ PIX_0_QUAD = 19,
+ PIX_1_QUAD = 20,
+ PIX_2_QUAD = 21,
+ PIX_3_QUAD = 22,
+ PIX_4_QUAD = 23,
+ TP_MIPMAP_LOD0 = 24,
+ TP_MIPMAP_LOD1 = 25,
+ TP_MIPMAP_LOD2 = 26,
+ TP_MIPMAP_LOD3 = 27,
+ TP_MIPMAP_LOD4 = 28,
+ TP_MIPMAP_LOD5 = 29,
+ TP_MIPMAP_LOD6 = 30,
+ TP_MIPMAP_LOD7 = 31,
+ TP_MIPMAP_LOD8 = 32,
+ TP_MIPMAP_LOD9 = 33,
+ TP_MIPMAP_LOD10 = 34,
+ TP_MIPMAP_LOD11 = 35,
+ TP_MIPMAP_LOD12 = 36,
+ TP_MIPMAP_LOD13 = 37,
+ TP_MIPMAP_LOD14 = 38,
+};
+
+enum a2xx_tcm_perfcount_select {
+ QUAD0_RD_LAT_FIFO_EMPTY = 0,
+ QUAD0_RD_LAT_FIFO_4TH_FULL = 3,
+ QUAD0_RD_LAT_FIFO_HALF_FULL = 4,
+ QUAD0_RD_LAT_FIFO_FULL = 5,
+ QUAD0_RD_LAT_FIFO_LT_4TH_FULL = 6,
+ READ_STARVED_QUAD0 = 28,
+ READ_STARVED = 32,
+ READ_STALLED_QUAD0 = 33,
+ READ_STALLED = 37,
+ VALID_READ_QUAD0 = 38,
+ TC_TP_STARVED_QUAD0 = 42,
+ TC_TP_STARVED = 46,
+};
+
+enum a2xx_tcf_perfcount_select {
+ VALID_CYCLES = 0,
+ SINGLE_PHASES = 1,
+ ANISO_PHASES = 2,
+ MIP_PHASES = 3,
+ VOL_PHASES = 4,
+ MIP_VOL_PHASES = 5,
+ MIP_ANISO_PHASES = 6,
+ VOL_ANISO_PHASES = 7,
+ ANISO_2_1_PHASES = 8,
+ ANISO_4_1_PHASES = 9,
+ ANISO_6_1_PHASES = 10,
+ ANISO_8_1_PHASES = 11,
+ ANISO_10_1_PHASES = 12,
+ ANISO_12_1_PHASES = 13,
+ ANISO_14_1_PHASES = 14,
+ ANISO_16_1_PHASES = 15,
+ MIP_VOL_ANISO_PHASES = 16,
+ ALIGN_2_PHASES = 17,
+ ALIGN_4_PHASES = 18,
+ TPC_BUSY = 19,
+ TPC_STALLED = 20,
+ TPC_STARVED = 21,
+ TPC_WORKING = 22,
+ TPC_WALKER_BUSY = 23,
+ TPC_WALKER_STALLED = 24,
+ TPC_WALKER_WORKING = 25,
+ TPC_ALIGNER_BUSY = 26,
+ TPC_ALIGNER_STALLED = 27,
+ TPC_ALIGNER_STALLED_BY_BLEND = 28,
+ TPC_ALIGNER_STALLED_BY_CACHE = 29,
+ TPC_ALIGNER_WORKING = 30,
+ TPC_BLEND_BUSY = 31,
+ TPC_BLEND_SYNC = 32,
+ TPC_BLEND_STARVED = 33,
+ TPC_BLEND_WORKING = 34,
+ OPCODE_0x00 = 35,
+ OPCODE_0x01 = 36,
+ OPCODE_0x04 = 37,
+ OPCODE_0x10 = 38,
+ OPCODE_0x11 = 39,
+ OPCODE_0x12 = 40,
+ OPCODE_0x13 = 41,
+ OPCODE_0x18 = 42,
+ OPCODE_0x19 = 43,
+ OPCODE_0x1A = 44,
+ OPCODE_OTHER = 45,
+ IN_FIFO_0_EMPTY = 56,
+ IN_FIFO_0_LT_HALF_FULL = 57,
+ IN_FIFO_0_HALF_FULL = 58,
+ IN_FIFO_0_FULL = 59,
+ IN_FIFO_TPC_EMPTY = 72,
+ IN_FIFO_TPC_LT_HALF_FULL = 73,
+ IN_FIFO_TPC_HALF_FULL = 74,
+ IN_FIFO_TPC_FULL = 75,
+ TPC_TC_XFC = 76,
+ TPC_TC_STATE = 77,
+ TC_STALL = 78,
+ QUAD0_TAPS = 79,
+ QUADS = 83,
+ TCA_SYNC_STALL = 84,
+ TAG_STALL = 85,
+ TCB_SYNC_STALL = 88,
+ TCA_VALID = 89,
+ PROBES_VALID = 90,
+ MISS_STALL = 91,
+ FETCH_FIFO_STALL = 92,
+ TCO_STALL = 93,
+ ANY_STALL = 94,
+ TAG_MISSES = 95,
+ TAG_HITS = 96,
+ SUB_TAG_MISSES = 97,
+ SET0_INVALIDATES = 98,
+ SET1_INVALIDATES = 99,
+ SET2_INVALIDATES = 100,
+ SET3_INVALIDATES = 101,
+ SET0_TAG_MISSES = 102,
+ SET1_TAG_MISSES = 103,
+ SET2_TAG_MISSES = 104,
+ SET3_TAG_MISSES = 105,
+ SET0_TAG_HITS = 106,
+ SET1_TAG_HITS = 107,
+ SET2_TAG_HITS = 108,
+ SET3_TAG_HITS = 109,
+ SET0_SUB_TAG_MISSES = 110,
+ SET1_SUB_TAG_MISSES = 111,
+ SET2_SUB_TAG_MISSES = 112,
+ SET3_SUB_TAG_MISSES = 113,
+ SET0_EVICT1 = 114,
+ SET0_EVICT2 = 115,
+ SET0_EVICT3 = 116,
+ SET0_EVICT4 = 117,
+ SET0_EVICT5 = 118,
+ SET0_EVICT6 = 119,
+ SET0_EVICT7 = 120,
+ SET0_EVICT8 = 121,
+ SET1_EVICT1 = 130,
+ SET1_EVICT2 = 131,
+ SET1_EVICT3 = 132,
+ SET1_EVICT4 = 133,
+ SET1_EVICT5 = 134,
+ SET1_EVICT6 = 135,
+ SET1_EVICT7 = 136,
+ SET1_EVICT8 = 137,
+ SET2_EVICT1 = 146,
+ SET2_EVICT2 = 147,
+ SET2_EVICT3 = 148,
+ SET2_EVICT4 = 149,
+ SET2_EVICT5 = 150,
+ SET2_EVICT6 = 151,
+ SET2_EVICT7 = 152,
+ SET2_EVICT8 = 153,
+ SET3_EVICT1 = 162,
+ SET3_EVICT2 = 163,
+ SET3_EVICT3 = 164,
+ SET3_EVICT4 = 165,
+ SET3_EVICT5 = 166,
+ SET3_EVICT6 = 167,
+ SET3_EVICT7 = 168,
+ SET3_EVICT8 = 169,
+ FF_EMPTY = 178,
+ FF_LT_HALF_FULL = 179,
+ FF_HALF_FULL = 180,
+ FF_FULL = 181,
+ FF_XFC = 182,
+ FF_STALLED = 183,
+ FG_MASKS = 184,
+ FG_LEFT_MASKS = 185,
+ FG_LEFT_MASK_STALLED = 186,
+ FG_LEFT_NOT_DONE_STALL = 187,
+ FG_LEFT_FG_STALL = 188,
+ FG_LEFT_SECTORS = 189,
+ FG0_REQUESTS = 195,
+ FG0_STALLED = 196,
+ MEM_REQ512 = 199,
+ MEM_REQ_SENT = 200,
+ MEM_LOCAL_READ_REQ = 202,
+ TC0_MH_STALLED = 203,
+};
+
+enum a2xx_sq_perfcnt_select {
+ SQ_PIXEL_VECTORS_SUB = 0,
+ SQ_VERTEX_VECTORS_SUB = 1,
+ SQ_ALU0_ACTIVE_VTX_SIMD0 = 2,
+ SQ_ALU1_ACTIVE_VTX_SIMD0 = 3,
+ SQ_ALU0_ACTIVE_PIX_SIMD0 = 4,
+ SQ_ALU1_ACTIVE_PIX_SIMD0 = 5,
+ SQ_ALU0_ACTIVE_VTX_SIMD1 = 6,
+ SQ_ALU1_ACTIVE_VTX_SIMD1 = 7,
+ SQ_ALU0_ACTIVE_PIX_SIMD1 = 8,
+ SQ_ALU1_ACTIVE_PIX_SIMD1 = 9,
+ SQ_EXPORT_CYCLES = 10,
+ SQ_ALU_CST_WRITTEN = 11,
+ SQ_TEX_CST_WRITTEN = 12,
+ SQ_ALU_CST_STALL = 13,
+ SQ_ALU_TEX_STALL = 14,
+ SQ_INST_WRITTEN = 15,
+ SQ_BOOLEAN_WRITTEN = 16,
+ SQ_LOOPS_WRITTEN = 17,
+ SQ_PIXEL_SWAP_IN = 18,
+ SQ_PIXEL_SWAP_OUT = 19,
+ SQ_VERTEX_SWAP_IN = 20,
+ SQ_VERTEX_SWAP_OUT = 21,
+ SQ_ALU_VTX_INST_ISSUED = 22,
+ SQ_TEX_VTX_INST_ISSUED = 23,
+ SQ_VC_VTX_INST_ISSUED = 24,
+ SQ_CF_VTX_INST_ISSUED = 25,
+ SQ_ALU_PIX_INST_ISSUED = 26,
+ SQ_TEX_PIX_INST_ISSUED = 27,
+ SQ_VC_PIX_INST_ISSUED = 28,
+ SQ_CF_PIX_INST_ISSUED = 29,
+ SQ_ALU0_FIFO_EMPTY_SIMD0 = 30,
+ SQ_ALU1_FIFO_EMPTY_SIMD0 = 31,
+ SQ_ALU0_FIFO_EMPTY_SIMD1 = 32,
+ SQ_ALU1_FIFO_EMPTY_SIMD1 = 33,
+ SQ_ALU_NOPS = 34,
+ SQ_PRED_SKIP = 35,
+ SQ_SYNC_ALU_STALL_SIMD0_VTX = 36,
+ SQ_SYNC_ALU_STALL_SIMD1_VTX = 37,
+ SQ_SYNC_TEX_STALL_VTX = 38,
+ SQ_SYNC_VC_STALL_VTX = 39,
+ SQ_CONSTANTS_USED_SIMD0 = 40,
+ SQ_CONSTANTS_SENT_SP_SIMD0 = 41,
+ SQ_GPR_STALL_VTX = 42,
+ SQ_GPR_STALL_PIX = 43,
+ SQ_VTX_RS_STALL = 44,
+ SQ_PIX_RS_STALL = 45,
+ SQ_SX_PC_FULL = 46,
+ SQ_SX_EXP_BUFF_FULL = 47,
+ SQ_SX_POS_BUFF_FULL = 48,
+ SQ_INTERP_QUADS = 49,
+ SQ_INTERP_ACTIVE = 50,
+ SQ_IN_PIXEL_STALL = 51,
+ SQ_IN_VTX_STALL = 52,
+ SQ_VTX_CNT = 53,
+ SQ_VTX_VECTOR2 = 54,
+ SQ_VTX_VECTOR3 = 55,
+ SQ_VTX_VECTOR4 = 56,
+ SQ_PIXEL_VECTOR1 = 57,
+ SQ_PIXEL_VECTOR23 = 58,
+ SQ_PIXEL_VECTOR4 = 59,
+ SQ_CONSTANTS_USED_SIMD1 = 60,
+ SQ_CONSTANTS_SENT_SP_SIMD1 = 61,
+ SQ_SX_MEM_EXP_FULL = 62,
+ SQ_ALU0_ACTIVE_VTX_SIMD2 = 63,
+ SQ_ALU1_ACTIVE_VTX_SIMD2 = 64,
+ SQ_ALU0_ACTIVE_PIX_SIMD2 = 65,
+ SQ_ALU1_ACTIVE_PIX_SIMD2 = 66,
+ SQ_ALU0_ACTIVE_VTX_SIMD3 = 67,
+ SQ_PERFCOUNT_VTX_QUAL_TP_DONE = 68,
+ SQ_ALU0_ACTIVE_PIX_SIMD3 = 69,
+ SQ_PERFCOUNT_PIX_QUAL_TP_DONE = 70,
+ SQ_ALU0_FIFO_EMPTY_SIMD2 = 71,
+ SQ_ALU1_FIFO_EMPTY_SIMD2 = 72,
+ SQ_ALU0_FIFO_EMPTY_SIMD3 = 73,
+ SQ_ALU1_FIFO_EMPTY_SIMD3 = 74,
+ SQ_SYNC_ALU_STALL_SIMD2_VTX = 75,
+ SQ_PERFCOUNT_VTX_POP_THREAD = 76,
+ SQ_SYNC_ALU_STALL_SIMD0_PIX = 77,
+ SQ_SYNC_ALU_STALL_SIMD1_PIX = 78,
+ SQ_SYNC_ALU_STALL_SIMD2_PIX = 79,
+ SQ_PERFCOUNT_PIX_POP_THREAD = 80,
+ SQ_SYNC_TEX_STALL_PIX = 81,
+ SQ_SYNC_VC_STALL_PIX = 82,
+ SQ_CONSTANTS_USED_SIMD2 = 83,
+ SQ_CONSTANTS_SENT_SP_SIMD2 = 84,
+ SQ_PERFCOUNT_VTX_DEALLOC_ACK = 85,
+ SQ_PERFCOUNT_PIX_DEALLOC_ACK = 86,
+ SQ_ALU0_FIFO_FULL_SIMD0 = 87,
+ SQ_ALU1_FIFO_FULL_SIMD0 = 88,
+ SQ_ALU0_FIFO_FULL_SIMD1 = 89,
+ SQ_ALU1_FIFO_FULL_SIMD1 = 90,
+ SQ_ALU0_FIFO_FULL_SIMD2 = 91,
+ SQ_ALU1_FIFO_FULL_SIMD2 = 92,
+ SQ_ALU0_FIFO_FULL_SIMD3 = 93,
+ SQ_ALU1_FIFO_FULL_SIMD3 = 94,
+ VC_PERF_STATIC = 95,
+ VC_PERF_STALLED = 96,
+ VC_PERF_STARVED = 97,
+ VC_PERF_SEND = 98,
+ VC_PERF_ACTUAL_STARVED = 99,
+ PIXEL_THREAD_0_ACTIVE = 100,
+ VERTEX_THREAD_0_ACTIVE = 101,
+ PIXEL_THREAD_0_NUMBER = 102,
+ VERTEX_THREAD_0_NUMBER = 103,
+ VERTEX_EVENT_NUMBER = 104,
+ PIXEL_EVENT_NUMBER = 105,
+ PTRBUFF_EF_PUSH = 106,
+ PTRBUFF_EF_POP_EVENT = 107,
+ PTRBUFF_EF_POP_NEW_VTX = 108,
+ PTRBUFF_EF_POP_DEALLOC = 109,
+ PTRBUFF_EF_POP_PVECTOR = 110,
+ PTRBUFF_EF_POP_PVECTOR_X = 111,
+ PTRBUFF_EF_POP_PVECTOR_VNZ = 112,
+ PTRBUFF_PB_DEALLOC = 113,
+ PTRBUFF_PI_STATE_PPB_POP = 114,
+ PTRBUFF_PI_RTR = 115,
+ PTRBUFF_PI_READ_EN = 116,
+ PTRBUFF_PI_BUFF_SWAP = 117,
+ PTRBUFF_SQ_FREE_BUFF = 118,
+ PTRBUFF_SQ_DEC = 119,
+ PTRBUFF_SC_VALID_CNTL_EVENT = 120,
+ PTRBUFF_SC_VALID_IJ_XFER = 121,
+ PTRBUFF_SC_NEW_VECTOR_1_Q = 122,
+ PTRBUFF_QUAL_NEW_VECTOR = 123,
+ PTRBUFF_QUAL_EVENT = 124,
+ PTRBUFF_END_BUFFER = 125,
+ PTRBUFF_FILL_QUAD = 126,
+ VERTS_WRITTEN_SPI = 127,
+ TP_FETCH_INSTR_EXEC = 128,
+ TP_FETCH_INSTR_REQ = 129,
+ TP_DATA_RETURN = 130,
+ SPI_WRITE_CYCLES_SP = 131,
+ SPI_WRITES_SP = 132,
+ SP_ALU_INSTR_EXEC = 133,
+ SP_CONST_ADDR_TO_SQ = 134,
+ SP_PRED_KILLS_TO_SQ = 135,
+ SP_EXPORT_CYCLES_TO_SX = 136,
+ SP_EXPORTS_TO_SX = 137,
+ SQ_CYCLES_ELAPSED = 138,
+ SQ_TCFS_OPT_ALLOC_EXEC = 139,
+ SQ_TCFS_NO_OPT_ALLOC = 140,
+ SQ_ALU0_NO_OPT_ALLOC = 141,
+ SQ_ALU1_NO_OPT_ALLOC = 142,
+ SQ_TCFS_ARB_XFC_CNT = 143,
+ SQ_ALU0_ARB_XFC_CNT = 144,
+ SQ_ALU1_ARB_XFC_CNT = 145,
+ SQ_TCFS_CFS_UPDATE_CNT = 146,
+ SQ_ALU0_CFS_UPDATE_CNT = 147,
+ SQ_ALU1_CFS_UPDATE_CNT = 148,
+ SQ_VTX_PUSH_THREAD_CNT = 149,
+ SQ_VTX_POP_THREAD_CNT = 150,
+ SQ_PIX_PUSH_THREAD_CNT = 151,
+ SQ_PIX_POP_THREAD_CNT = 152,
+ SQ_PIX_TOTAL = 153,
+ SQ_PIX_KILLED = 154,
+};
+
+enum a2xx_sx_perfcnt_select {
+ SX_EXPORT_VECTORS = 0,
+ SX_DUMMY_QUADS = 1,
+ SX_ALPHA_FAIL = 2,
+ SX_RB_QUAD_BUSY = 3,
+ SX_RB_COLOR_BUSY = 4,
+ SX_RB_QUAD_STALL = 5,
+ SX_RB_COLOR_STALL = 6,
+};
+
+enum a2xx_rbbm_perfcount1_sel {
+ RBBM1_COUNT = 0,
+ RBBM1_NRT_BUSY = 1,
+ RBBM1_RB_BUSY = 2,
+ RBBM1_SQ_CNTX0_BUSY = 3,
+ RBBM1_SQ_CNTX17_BUSY = 4,
+ RBBM1_VGT_BUSY = 5,
+ RBBM1_VGT_NODMA_BUSY = 6,
+ RBBM1_PA_BUSY = 7,
+ RBBM1_SC_CNTX_BUSY = 8,
+ RBBM1_TPC_BUSY = 9,
+ RBBM1_TC_BUSY = 10,
+ RBBM1_SX_BUSY = 11,
+ RBBM1_CP_COHER_BUSY = 12,
+ RBBM1_CP_NRT_BUSY = 13,
+ RBBM1_GFX_IDLE_STALL = 14,
+ RBBM1_INTERRUPT = 15,
+};
+
+enum a2xx_cp_perfcount_sel {
+ ALWAYS_COUNT = 0,
+ TRANS_FIFO_FULL = 1,
+ TRANS_FIFO_AF = 2,
+ RCIU_PFPTRANS_WAIT = 3,
+ RCIU_NRTTRANS_WAIT = 6,
+ CSF_NRT_READ_WAIT = 8,
+ CSF_I1_FIFO_FULL = 9,
+ CSF_I2_FIFO_FULL = 10,
+ CSF_ST_FIFO_FULL = 11,
+ CSF_RING_ROQ_FULL = 13,
+ CSF_I1_ROQ_FULL = 14,
+ CSF_I2_ROQ_FULL = 15,
+ CSF_ST_ROQ_FULL = 16,
+ MIU_TAG_MEM_FULL = 18,
+ MIU_WRITECLEAN = 19,
+ MIU_NRT_WRITE_STALLED = 22,
+ MIU_NRT_READ_STALLED = 23,
+ ME_WRITE_CONFIRM_FIFO_FULL = 24,
+ ME_VS_DEALLOC_FIFO_FULL = 25,
+ ME_PS_DEALLOC_FIFO_FULL = 26,
+ ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ ME_MICRO_RB_STARVED = 30,
+ ME_MICRO_I1_STARVED = 31,
+ ME_MICRO_I2_STARVED = 32,
+ ME_MICRO_ST_STARVED = 33,
+ RCIU_RBBM_DWORD_SENT = 40,
+ ME_BUSY_CLOCKS = 41,
+ ME_WAIT_CONTEXT_AVAIL = 42,
+ PFP_TYPE0_PACKET = 43,
+ PFP_TYPE3_PACKET = 44,
+ CSF_RB_WPTR_NEQ_RPTR = 45,
+ CSF_I1_SIZE_NEQ_ZERO = 46,
+ CSF_I2_SIZE_NEQ_ZERO = 47,
+ CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a2xx_rb_perfcnt_select {
+ RBPERF_CNTX_BUSY = 0,
+ RBPERF_CNTX_BUSY_MAX = 1,
+ RBPERF_SX_QUAD_STARVED = 2,
+ RBPERF_SX_QUAD_STARVED_MAX = 3,
+ RBPERF_GA_GC_CH0_SYS_REQ = 4,
+ RBPERF_GA_GC_CH0_SYS_REQ_MAX = 5,
+ RBPERF_GA_GC_CH1_SYS_REQ = 6,
+ RBPERF_GA_GC_CH1_SYS_REQ_MAX = 7,
+ RBPERF_MH_STARVED = 8,
+ RBPERF_MH_STARVED_MAX = 9,
+ RBPERF_AZ_BC_COLOR_BUSY = 10,
+ RBPERF_AZ_BC_COLOR_BUSY_MAX = 11,
+ RBPERF_AZ_BC_Z_BUSY = 12,
+ RBPERF_AZ_BC_Z_BUSY_MAX = 13,
+ RBPERF_RB_SC_TILE_RTR_N = 14,
+ RBPERF_RB_SC_TILE_RTR_N_MAX = 15,
+ RBPERF_RB_SC_SAMP_RTR_N = 16,
+ RBPERF_RB_SC_SAMP_RTR_N_MAX = 17,
+ RBPERF_RB_SX_QUAD_RTR_N = 18,
+ RBPERF_RB_SX_QUAD_RTR_N_MAX = 19,
+ RBPERF_RB_SX_COLOR_RTR_N = 20,
+ RBPERF_RB_SX_COLOR_RTR_N_MAX = 21,
+ RBPERF_RB_SC_SAMP_LZ_BUSY = 22,
+ RBPERF_RB_SC_SAMP_LZ_BUSY_MAX = 23,
+ RBPERF_ZXP_STALL = 24,
+ RBPERF_ZXP_STALL_MAX = 25,
+ RBPERF_EVENT_PENDING = 26,
+ RBPERF_EVENT_PENDING_MAX = 27,
+ RBPERF_RB_MH_VALID = 28,
+ RBPERF_RB_MH_VALID_MAX = 29,
+ RBPERF_SX_RB_QUAD_SEND = 30,
+ RBPERF_SX_RB_COLOR_SEND = 31,
+ RBPERF_SC_RB_TILE_SEND = 32,
+ RBPERF_SC_RB_SAMPLE_SEND = 33,
+ RBPERF_SX_RB_MEM_EXPORT = 34,
+ RBPERF_SX_RB_QUAD_EVENT = 35,
+ RBPERF_SC_RB_TILE_EVENT_FILTERED = 36,
+ RBPERF_SC_RB_TILE_EVENT_ALL = 37,
+ RBPERF_RB_SC_EZ_SEND = 38,
+ RBPERF_RB_SX_INDEX_SEND = 39,
+ RBPERF_GMEM_INTFO_RD = 40,
+ RBPERF_GMEM_INTF1_RD = 41,
+ RBPERF_GMEM_INTFO_WR = 42,
+ RBPERF_GMEM_INTF1_WR = 43,
+ RBPERF_RB_CP_CONTEXT_DONE = 44,
+ RBPERF_RB_CP_CACHE_FLUSH = 45,
+ RBPERF_ZPASS_DONE = 46,
+ RBPERF_ZCMD_VALID = 47,
+ RBPERF_CCMD_VALID = 48,
+ RBPERF_ACCUM_GRANT = 49,
+ RBPERF_ACCUM_C0_GRANT = 50,
+ RBPERF_ACCUM_C1_GRANT = 51,
+ RBPERF_ACCUM_FULL_BE_WR = 52,
+ RBPERF_ACCUM_REQUEST_NO_GRANT = 53,
+ RBPERF_ACCUM_TIMEOUT_PULSE = 54,
+ RBPERF_ACCUM_LIN_TIMEOUT_PULSE = 55,
+ RBPERF_ACCUM_CAM_HIT_FLUSHING = 56,
+};
+
+enum a2xx_mh_perfcnt_select {
+ CP_R0_REQUESTS = 0,
+ CP_R1_REQUESTS = 1,
+ CP_R2_REQUESTS = 2,
+ CP_R3_REQUESTS = 3,
+ CP_R4_REQUESTS = 4,
+ CP_TOTAL_READ_REQUESTS = 5,
+ CP_TOTAL_WRITE_REQUESTS = 6,
+ CP_TOTAL_REQUESTS = 7,
+ CP_DATA_BYTES_WRITTEN = 8,
+ CP_WRITE_CLEAN_RESPONSES = 9,
+ CP_R0_READ_BURSTS_RECEIVED = 10,
+ CP_R1_READ_BURSTS_RECEIVED = 11,
+ CP_R2_READ_BURSTS_RECEIVED = 12,
+ CP_R3_READ_BURSTS_RECEIVED = 13,
+ CP_R4_READ_BURSTS_RECEIVED = 14,
+ CP_TOTAL_READ_BURSTS_RECEIVED = 15,
+ CP_R0_DATA_BEATS_READ = 16,
+ CP_R1_DATA_BEATS_READ = 17,
+ CP_R2_DATA_BEATS_READ = 18,
+ CP_R3_DATA_BEATS_READ = 19,
+ CP_R4_DATA_BEATS_READ = 20,
+ CP_TOTAL_DATA_BEATS_READ = 21,
+ VGT_R0_REQUESTS = 22,
+ VGT_R1_REQUESTS = 23,
+ VGT_TOTAL_REQUESTS = 24,
+ VGT_R0_READ_BURSTS_RECEIVED = 25,
+ VGT_R1_READ_BURSTS_RECEIVED = 26,
+ VGT_TOTAL_READ_BURSTS_RECEIVED = 27,
+ VGT_R0_DATA_BEATS_READ = 28,
+ VGT_R1_DATA_BEATS_READ = 29,
+ VGT_TOTAL_DATA_BEATS_READ = 30,
+ TC_TOTAL_REQUESTS = 31,
+ TC_ROQ_REQUESTS = 32,
+ TC_INFO_SENT = 33,
+ TC_READ_BURSTS_RECEIVED = 34,
+ TC_DATA_BEATS_READ = 35,
+ TCD_BURSTS_READ = 36,
+ RB_REQUESTS = 37,
+ RB_DATA_BYTES_WRITTEN = 38,
+ RB_WRITE_CLEAN_RESPONSES = 39,
+ AXI_READ_REQUESTS_ID_0 = 40,
+ AXI_READ_REQUESTS_ID_1 = 41,
+ AXI_READ_REQUESTS_ID_2 = 42,
+ AXI_READ_REQUESTS_ID_3 = 43,
+ AXI_READ_REQUESTS_ID_4 = 44,
+ AXI_READ_REQUESTS_ID_5 = 45,
+ AXI_READ_REQUESTS_ID_6 = 46,
+ AXI_READ_REQUESTS_ID_7 = 47,
+ AXI_TOTAL_READ_REQUESTS = 48,
+ AXI_WRITE_REQUESTS_ID_0 = 49,
+ AXI_WRITE_REQUESTS_ID_1 = 50,
+ AXI_WRITE_REQUESTS_ID_2 = 51,
+ AXI_WRITE_REQUESTS_ID_3 = 52,
+ AXI_WRITE_REQUESTS_ID_4 = 53,
+ AXI_WRITE_REQUESTS_ID_5 = 54,
+ AXI_WRITE_REQUESTS_ID_6 = 55,
+ AXI_WRITE_REQUESTS_ID_7 = 56,
+ AXI_TOTAL_WRITE_REQUESTS = 57,
+ AXI_TOTAL_REQUESTS_ID_0 = 58,
+ AXI_TOTAL_REQUESTS_ID_1 = 59,
+ AXI_TOTAL_REQUESTS_ID_2 = 60,
+ AXI_TOTAL_REQUESTS_ID_3 = 61,
+ AXI_TOTAL_REQUESTS_ID_4 = 62,
+ AXI_TOTAL_REQUESTS_ID_5 = 63,
+ AXI_TOTAL_REQUESTS_ID_6 = 64,
+ AXI_TOTAL_REQUESTS_ID_7 = 65,
+ AXI_TOTAL_REQUESTS = 66,
+ AXI_READ_CHANNEL_BURSTS_ID_0 = 67,
+ AXI_READ_CHANNEL_BURSTS_ID_1 = 68,
+ AXI_READ_CHANNEL_BURSTS_ID_2 = 69,
+ AXI_READ_CHANNEL_BURSTS_ID_3 = 70,
+ AXI_READ_CHANNEL_BURSTS_ID_4 = 71,
+ AXI_READ_CHANNEL_BURSTS_ID_5 = 72,
+ AXI_READ_CHANNEL_BURSTS_ID_6 = 73,
+ AXI_READ_CHANNEL_BURSTS_ID_7 = 74,
+ AXI_READ_CHANNEL_TOTAL_BURSTS = 75,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_0 = 76,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_1 = 77,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_2 = 78,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_3 = 79,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_4 = 80,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_5 = 81,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_6 = 82,
+ AXI_READ_CHANNEL_DATA_BEATS_READ_ID_7 = 83,
+ AXI_READ_CHANNEL_TOTAL_DATA_BEATS_READ = 84,
+ AXI_WRITE_CHANNEL_BURSTS_ID_0 = 85,
+ AXI_WRITE_CHANNEL_BURSTS_ID_1 = 86,
+ AXI_WRITE_CHANNEL_BURSTS_ID_2 = 87,
+ AXI_WRITE_CHANNEL_BURSTS_ID_3 = 88,
+ AXI_WRITE_CHANNEL_BURSTS_ID_4 = 89,
+ AXI_WRITE_CHANNEL_BURSTS_ID_5 = 90,
+ AXI_WRITE_CHANNEL_BURSTS_ID_6 = 91,
+ AXI_WRITE_CHANNEL_BURSTS_ID_7 = 92,
+ AXI_WRITE_CHANNEL_TOTAL_BURSTS = 93,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_0 = 94,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_1 = 95,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_2 = 96,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_3 = 97,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_4 = 98,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_5 = 99,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_6 = 100,
+ AXI_WRITE_CHANNEL_DATA_BYTES_WRITTEN_ID_7 = 101,
+ AXI_WRITE_CHANNEL_TOTAL_DATA_BYTES_WRITTEN = 102,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_0 = 103,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_1 = 104,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_2 = 105,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_3 = 106,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_4 = 107,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_5 = 108,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_6 = 109,
+ AXI_WRITE_RESPONSE_CHANNEL_RESPONSES_ID_7 = 110,
+ AXI_WRITE_RESPONSE_CHANNEL_TOTAL_RESPONSES = 111,
+ TOTAL_MMU_MISSES = 112,
+ MMU_READ_MISSES = 113,
+ MMU_WRITE_MISSES = 114,
+ TOTAL_MMU_HITS = 115,
+ MMU_READ_HITS = 116,
+ MMU_WRITE_HITS = 117,
+ SPLIT_MODE_TC_HITS = 118,
+ SPLIT_MODE_TC_MISSES = 119,
+ SPLIT_MODE_NON_TC_HITS = 120,
+ SPLIT_MODE_NON_TC_MISSES = 121,
+ STALL_AWAITING_TLB_MISS_FETCH = 122,
+ MMU_TLB_MISS_READ_BURSTS_RECEIVED = 123,
+ MMU_TLB_MISS_DATA_BEATS_READ = 124,
+ CP_CYCLES_HELD_OFF = 125,
+ VGT_CYCLES_HELD_OFF = 126,
+ TC_CYCLES_HELD_OFF = 127,
+ TC_ROQ_CYCLES_HELD_OFF = 128,
+ TC_CYCLES_HELD_OFF_TCD_FULL = 129,
+ RB_CYCLES_HELD_OFF = 130,
+ TOTAL_CYCLES_ANY_CLNT_HELD_OFF = 131,
+ TLB_MISS_CYCLES_HELD_OFF = 132,
+ AXI_READ_REQUEST_HELD_OFF = 133,
+ AXI_WRITE_REQUEST_HELD_OFF = 134,
+ AXI_REQUEST_HELD_OFF = 135,
+ AXI_REQUEST_HELD_OFF_INFLIGHT_LIMIT = 136,
+ AXI_WRITE_DATA_HELD_OFF = 137,
+ CP_SAME_PAGE_BANK_REQUESTS = 138,
+ VGT_SAME_PAGE_BANK_REQUESTS = 139,
+ TC_SAME_PAGE_BANK_REQUESTS = 140,
+ TC_ARB_HOLD_SAME_PAGE_BANK_REQUESTS = 141,
+ RB_SAME_PAGE_BANK_REQUESTS = 142,
+ TOTAL_SAME_PAGE_BANK_REQUESTS = 143,
+ CP_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 144,
+ VGT_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 145,
+ TC_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 146,
+ RB_SAME_PAGE_BANK_REQUESTS_KILLED_FAIRNESS_LIMIT = 147,
+ TOTAL_SAME_PAGE_BANK_KILLED_FAIRNESS_LIMIT = 148,
+ TOTAL_MH_READ_REQUESTS = 149,
+ TOTAL_MH_WRITE_REQUESTS = 150,
+ TOTAL_MH_REQUESTS = 151,
+ MH_BUSY = 152,
+ CP_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 153,
+ VGT_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 154,
+ TC_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 155,
+ RB_NTH_ACCESS_SAME_PAGE_BANK_SEQUENCE = 156,
+ TC_ROQ_N_VALID_ENTRIES = 157,
+ ARQ_N_ENTRIES = 158,
+ WDB_N_ENTRIES = 159,
+ MH_READ_LATENCY_OUTST_REQ_SUM = 160,
+ MC_READ_LATENCY_OUTST_REQ_SUM = 161,
+ MC_TOTAL_READ_REQUESTS = 162,
+ ELAPSED_CYCLES_MH_GATED_CLK = 163,
+ ELAPSED_CLK_CYCLES = 164,
+ CP_W_16B_REQUESTS = 165,
+ CP_W_32B_REQUESTS = 166,
+ TC_16B_REQUESTS = 167,
+ TC_32B_REQUESTS = 168,
+ PA_REQUESTS = 169,
+ PA_DATA_BYTES_WRITTEN = 170,
+ PA_WRITE_CLEAN_RESPONSES = 171,
+ PA_CYCLES_HELD_OFF = 172,
+ AXI_READ_REQUEST_DATA_BEATS_ID_0 = 173,
+ AXI_READ_REQUEST_DATA_BEATS_ID_1 = 174,
+ AXI_READ_REQUEST_DATA_BEATS_ID_2 = 175,
+ AXI_READ_REQUEST_DATA_BEATS_ID_3 = 176,
+ AXI_READ_REQUEST_DATA_BEATS_ID_4 = 177,
+ AXI_READ_REQUEST_DATA_BEATS_ID_5 = 178,
+ AXI_READ_REQUEST_DATA_BEATS_ID_6 = 179,
+ AXI_READ_REQUEST_DATA_BEATS_ID_7 = 180,
+ AXI_TOTAL_READ_REQUEST_DATA_BEATS = 181,
+};
+
+enum perf_mode_cnt {
+ PERF_STATE_RESET = 0,
+ PERF_STATE_ENABLE = 1,
+ PERF_STATE_FREEZE = 2,
+};
+
+enum adreno_mmu_clnt_beh {
+ BEH_NEVR = 0,
+ BEH_TRAN_RNG = 1,
+ BEH_TRAN_FLT = 2,
+};
+
+enum sq_tex_clamp {
+ SQ_TEX_WRAP = 0,
+ SQ_TEX_MIRROR = 1,
+ SQ_TEX_CLAMP_LAST_TEXEL = 2,
+ SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
+ SQ_TEX_CLAMP_HALF_BORDER = 4,
+ SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
+ SQ_TEX_CLAMP_BORDER = 6,
+ SQ_TEX_MIRROR_ONCE_BORDER = 7,
+};
+
+enum sq_tex_swiz {
+ SQ_TEX_X = 0,
+ SQ_TEX_Y = 1,
+ SQ_TEX_Z = 2,
+ SQ_TEX_W = 3,
+ SQ_TEX_ZERO = 4,
+ SQ_TEX_ONE = 5,
+};
+
+enum sq_tex_filter {
+ SQ_TEX_FILTER_POINT = 0,
+ SQ_TEX_FILTER_BILINEAR = 1,
+ SQ_TEX_FILTER_BASEMAP = 2,
+ SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+ SQ_TEX_ANISO_FILTER_DISABLED = 0,
+ SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+ SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+ SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+ SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+ SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+ SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+ SQ_TEX_DIMENSION_1D = 0,
+ SQ_TEX_DIMENSION_2D = 1,
+ SQ_TEX_DIMENSION_3D = 2,
+ SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+ SQ_TEX_BORDER_COLOR_BLACK = 0,
+ SQ_TEX_BORDER_COLOR_WHITE = 1,
+ SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+ SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+ SQ_TEX_SIGN_UNSIGNED = 0,
+ SQ_TEX_SIGN_SIGNED = 1,
+ SQ_TEX_SIGN_UNSIGNED_BIASED = 2,
+ SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+ SQ_TEX_ENDIAN_NONE = 0,
+ SQ_TEX_ENDIAN_8IN16 = 1,
+ SQ_TEX_ENDIAN_8IN32 = 2,
+ SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+ SQ_TEX_CLAMP_POLICY_D3D = 0,
+ SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+ SQ_TEX_NUM_FORMAT_FRAC = 0,
+ SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+ SQ_TEX_TYPE_0 = 0,
+ SQ_TEX_TYPE_1 = 1,
+ SQ_TEX_TYPE_2 = 2,
+ SQ_TEX_TYPE_3 = 3,
+};
+
+#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
+
+#define REG_A2XX_RBBM_CNTL 0x0000003b
+
+#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
+
+#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
+
+#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
+
+#define REG_A2XX_MH_MMU_CONFIG 0x00000040
+#define A2XX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
+#define A2XX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
+#define A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
+static inline uint32_t A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
+#define A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
+#define A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
+#define A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
+#define A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
+#define A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
+#define A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
+static inline uint32_t A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
+#define A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
+#define A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
+static inline uint32_t A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
+#define A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
+static inline uint32_t A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
+#define A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
+static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
+
+#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
+
+#define REG_A2XX_MH_MMU_PAGE_FAULT 0x00000043
+
+#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
+
+#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
+
+#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
+
+#define REG_A2XX_MH_MMU_MPU_END 0x00000047
+
+#define REG_A2XX_NQWAIT_UNTIL 0x00000394
+
+#define REG_A2XX_RBBM_PERFCOUNTER0_SELECT 0x00000395
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000396
+
+#define REG_A2XX_RBBM_PERFCOUNTER0_LO 0x00000397
+
+#define REG_A2XX_RBBM_PERFCOUNTER0_HI 0x00000398
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000399
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x0000039a
+
+#define REG_A2XX_RBBM_DEBUG 0x0000039b
+
+#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
+#define A2XX_RBBM_PM_OVERRIDE1_RBBM_AHBCLK_PM_OVERRIDE 0x00000001
+#define A2XX_RBBM_PM_OVERRIDE1_SC_REG_SCLK_PM_OVERRIDE 0x00000002
+#define A2XX_RBBM_PM_OVERRIDE1_SC_SCLK_PM_OVERRIDE 0x00000004
+#define A2XX_RBBM_PM_OVERRIDE1_SP_TOP_SCLK_PM_OVERRIDE 0x00000008
+#define A2XX_RBBM_PM_OVERRIDE1_SP_V0_SCLK_PM_OVERRIDE 0x00000010
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_SCLK_PM_OVERRIDE 0x00000020
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_REG_FIFOS_SCLK_PM_OVERRIDE 0x00000040
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_CONST_MEM_SCLK_PM_OVERRIDE 0x00000080
+#define A2XX_RBBM_PM_OVERRIDE1_SQ_SQ_SCLK_PM_OVERRIDE 0x00000100
+#define A2XX_RBBM_PM_OVERRIDE1_SX_SCLK_PM_OVERRIDE 0x00000200
+#define A2XX_RBBM_PM_OVERRIDE1_SX_REG_SCLK_PM_OVERRIDE 0x00000400
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCO_SCLK_PM_OVERRIDE 0x00000800
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCM_SCLK_PM_OVERRIDE 0x00001000
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_TCD_SCLK_PM_OVERRIDE 0x00002000
+#define A2XX_RBBM_PM_OVERRIDE1_TCM_REG_SCLK_PM_OVERRIDE 0x00004000
+#define A2XX_RBBM_PM_OVERRIDE1_TPC_TPC_SCLK_PM_OVERRIDE 0x00008000
+#define A2XX_RBBM_PM_OVERRIDE1_TPC_REG_SCLK_PM_OVERRIDE 0x00010000
+#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCA_SCLK_PM_OVERRIDE 0x00020000
+#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_SCLK_PM_OVERRIDE 0x00040000
+#define A2XX_RBBM_PM_OVERRIDE1_TCF_TCB_READ_SCLK_PM_OVERRIDE 0x00080000
+#define A2XX_RBBM_PM_OVERRIDE1_TP_TP_SCLK_PM_OVERRIDE 0x00100000
+#define A2XX_RBBM_PM_OVERRIDE1_TP_REG_SCLK_PM_OVERRIDE 0x00200000
+#define A2XX_RBBM_PM_OVERRIDE1_CP_G_SCLK_PM_OVERRIDE 0x00400000
+#define A2XX_RBBM_PM_OVERRIDE1_CP_REG_SCLK_PM_OVERRIDE 0x00800000
+#define A2XX_RBBM_PM_OVERRIDE1_CP_G_REG_SCLK_PM_OVERRIDE 0x01000000
+#define A2XX_RBBM_PM_OVERRIDE1_SPI_SCLK_PM_OVERRIDE 0x02000000
+#define A2XX_RBBM_PM_OVERRIDE1_RB_REG_SCLK_PM_OVERRIDE 0x04000000
+#define A2XX_RBBM_PM_OVERRIDE1_RB_SCLK_PM_OVERRIDE 0x08000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_MH_SCLK_PM_OVERRIDE 0x10000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_REG_SCLK_PM_OVERRIDE 0x20000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_MMU_SCLK_PM_OVERRIDE 0x40000000
+#define A2XX_RBBM_PM_OVERRIDE1_MH_TCROQ_SCLK_PM_OVERRIDE 0x80000000
+
+#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
+#define A2XX_RBBM_PM_OVERRIDE2_PA_REG_SCLK_PM_OVERRIDE 0x00000001
+#define A2XX_RBBM_PM_OVERRIDE2_PA_PA_SCLK_PM_OVERRIDE 0x00000002
+#define A2XX_RBBM_PM_OVERRIDE2_PA_AG_SCLK_PM_OVERRIDE 0x00000004
+#define A2XX_RBBM_PM_OVERRIDE2_VGT_REG_SCLK_PM_OVERRIDE 0x00000008
+#define A2XX_RBBM_PM_OVERRIDE2_VGT_FIFOS_SCLK_PM_OVERRIDE 0x00000010
+#define A2XX_RBBM_PM_OVERRIDE2_VGT_VGT_SCLK_PM_OVERRIDE 0x00000020
+#define A2XX_RBBM_PM_OVERRIDE2_DEBUG_PERF_SCLK_PM_OVERRIDE 0x00000040
+#define A2XX_RBBM_PM_OVERRIDE2_PERM_SCLK_PM_OVERRIDE 0x00000080
+#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM0_PM_OVERRIDE 0x00000100
+#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM1_PM_OVERRIDE 0x00000200
+#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM2_PM_OVERRIDE 0x00000400
+#define A2XX_RBBM_PM_OVERRIDE2_GC_GA_GMEM3_PM_OVERRIDE 0x00000800
+
+#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
+
+#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
+
+#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
+
+#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
+
+#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
+
+#define REG_A2XX_RBBM_INT_ACK 0x000003b6
+
+#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
+
+#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
+
+#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
+
+#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
+#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK 0x00000007
+#define A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT 0
+static inline uint32_t A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT(enum perf_mode_cnt val)
+{
+ return ((val) << A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__SHIFT) & A2XX_CP_PERFMON_CNTL_PERF_MODE_CNT__MASK;
+}
+
+#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
+
+#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
+
+#define REG_A2XX_RBBM_STATUS 0x000005d0
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
+static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
+{
+ return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
+}
+#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
+#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
+#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
+#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
+#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
+#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
+#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
+#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
+#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
+#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
+#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
+#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
+#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
+#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
+#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
+#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
+#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
+#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
+#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
+
+#define REG_A2XX_MH_ARBITER_CONFIG 0x00000a40
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK 0x0000003f
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT 0
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_SAME_PAGE_GRANULARITY 0x00000040
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE 0x00000080
+#define A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE 0x00000100
+#define A2XX_MH_ARBITER_CONFIG_L2_ARB_CONTROL 0x00000200
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK 0x00001c00
+#define A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT 10
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__SHIFT) & A2XX_MH_ARBITER_CONFIG_PAGE_SIZE__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE 0x00002000
+#define A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE 0x00004000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE 0x00008000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK 0x003f0000
+#define A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT 16
+static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__SHIFT) & A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT__MASK;
+}
+#define A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE 0x00400000
+#define A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE 0x00800000
+#define A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE 0x01000000
+#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
+#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+
+#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
+
+#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
+
+#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
+
+#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
+
+#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
+#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK 0xffffffe0
+#define A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT 5
+static inline uint32_t A2XX_PA_SU_FACE_DATA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A2XX_PA_SU_FACE_DATA_BASE_ADDR__SHIFT) & A2XX_PA_SU_FACE_DATA_BASE_ADDR__MASK;
+}
+
+#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
+#define A2XX_SQ_GPR_MANAGEMENT_REG_DYNAMIC 0x00000001
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK 0x00000ff0
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT 4
+static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_PIX__MASK;
+}
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK 0x000ff000
+#define A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT 12
+static inline uint32_t A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__SHIFT) & A2XX_SQ_GPR_MANAGEMENT_REG_SIZE_VTX__MASK;
+}
+
+#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
+
+#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK 0x00000fff
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT 0
+static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_PIX__MASK;
+}
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK 0x0fff0000
+#define A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT 16
+static inline uint32_t A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__SHIFT) & A2XX_SQ_INST_STORE_MANAGMENT_INST_BASE_VTX__MASK;
+}
+
+#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
+
+#define REG_A2XX_SQ_INT_CNTL 0x00000d34
+
+#define REG_A2XX_SQ_INT_STATUS 0x00000d35
+
+#define REG_A2XX_SQ_INT_ACK 0x00000d36
+
+#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
+
+#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
+
+#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
+
+#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
+
+#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
+
+#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
+
+#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
+
+#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
+
+#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
+#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
+
+#define REG_A2XX_TP0_CHICKEN 0x00000e1e
+
+#define REG_A2XX_RB_BC_CONTROL 0x00000f01
+#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
+#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
+#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
+static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
+#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
+#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
+#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
+}
+#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
+static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
+#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
+#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
+
+#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
+
+#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
+
+#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
+
+#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
+
+#define REG_A2XX_RB_COLOR_INFO 0x00002001
+#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
+#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
+static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
+}
+#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
+#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
+#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
+static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
+}
+#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
+#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
+static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
+#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
+{
+ return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_INFO 0x00002002
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
+
+#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
+#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_UNKNOWN_2010 0x00002010
+
+#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
+
+#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
+
+#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
+
+#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
+
+#define REG_A2XX_RB_COLOR_MASK 0x00002104
+#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
+#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
+#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
+#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
+
+#define REG_A2XX_RB_BLEND_RED 0x00002105
+
+#define REG_A2XX_RB_BLEND_GREEN 0x00002106
+
+#define REG_A2XX_RB_BLEND_BLUE 0x00002107
+
+#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
+
+#define REG_A2XX_RB_FOG_COLOR 0x00002109
+#define A2XX_RB_FOG_COLOR_FOG_RED__MASK 0x000000ff
+#define A2XX_RB_FOG_COLOR_FOG_RED__SHIFT 0
+static inline uint32_t A2XX_RB_FOG_COLOR_FOG_RED(uint32_t val)
+{
+ return ((val) << A2XX_RB_FOG_COLOR_FOG_RED__SHIFT) & A2XX_RB_FOG_COLOR_FOG_RED__MASK;
+}
+#define A2XX_RB_FOG_COLOR_FOG_GREEN__MASK 0x0000ff00
+#define A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT 8
+static inline uint32_t A2XX_RB_FOG_COLOR_FOG_GREEN(uint32_t val)
+{
+ return ((val) << A2XX_RB_FOG_COLOR_FOG_GREEN__SHIFT) & A2XX_RB_FOG_COLOR_FOG_GREEN__MASK;
+}
+#define A2XX_RB_FOG_COLOR_FOG_BLUE__MASK 0x00ff0000
+#define A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT 16
+static inline uint32_t A2XX_RB_FOG_COLOR_FOG_BLUE(uint32_t val)
+{
+ return ((val) << A2XX_RB_FOG_COLOR_FOG_BLUE__SHIFT) & A2XX_RB_FOG_COLOR_FOG_BLUE__MASK;
+}
+
+#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
+#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_ALPHA_REF 0x0000210e
+
+#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
+#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
+#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
+#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
+#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
+#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
+#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
+#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
+#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
+
+#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
+#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
+#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
+#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
+#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
+
+#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
+#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK 0x0000ffff
+#define A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT 0
+static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_PARAM_SHADE__MASK;
+}
+#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK 0xffff0000
+#define A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT 16
+static inline uint32_t A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN(uint32_t val)
+{
+ return ((val) << A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__SHIFT) & A2XX_SQ_INTERPOLATOR_CNTL_SAMPLING_PATTERN__MASK;
+}
+
+#define REG_A2XX_SQ_WRAPPING_0 0x00002183
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK 0x0000000f
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT 0
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_0(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_0__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK 0x000000f0
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT 4
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_1(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_1__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK 0x00000f00
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT 8
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_2(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_2__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK 0x0000f000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT 12
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_3(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_3__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK 0x000f0000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT 16
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_4(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_4__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK 0x00f00000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT 20
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_5(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_5__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK 0x0f000000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT 24
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_6(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_6__MASK;
+}
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK 0xf0000000
+#define A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT 28
+static inline uint32_t A2XX_SQ_WRAPPING_0_PARAM_WRAP_7(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__SHIFT) & A2XX_SQ_WRAPPING_0_PARAM_WRAP_7__MASK;
+}
+
+#define REG_A2XX_SQ_WRAPPING_1 0x00002184
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK 0x0000000f
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT 0
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_8(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_8__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK 0x000000f0
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT 4
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_9(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_9__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK 0x00000f00
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT 8
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_10(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_10__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK 0x0000f000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT 12
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_11(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_11__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK 0x000f0000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT 16
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_12(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_12__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK 0x00f00000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT 20
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_13(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_13__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK 0x0f000000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT 24
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_14(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_14__MASK;
+}
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK 0xf0000000
+#define A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT 28
+static inline uint32_t A2XX_SQ_WRAPPING_1_PARAM_WRAP_15(uint32_t val)
+{
+ return ((val) << A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__SHIFT) & A2XX_SQ_WRAPPING_1_PARAM_WRAP_15__MASK;
+}
+
+#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
+#define A2XX_SQ_PS_PROGRAM_BASE__MASK 0x00000fff
+#define A2XX_SQ_PS_PROGRAM_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_PS_PROGRAM_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_PROGRAM_BASE__SHIFT) & A2XX_SQ_PS_PROGRAM_BASE__MASK;
+}
+#define A2XX_SQ_PS_PROGRAM_SIZE__MASK 0x00fff000
+#define A2XX_SQ_PS_PROGRAM_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_PS_PROGRAM_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_PS_PROGRAM_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
+#define A2XX_SQ_VS_PROGRAM_BASE__MASK 0x00000fff
+#define A2XX_SQ_VS_PROGRAM_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_VS_PROGRAM_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_PROGRAM_BASE__SHIFT) & A2XX_SQ_VS_PROGRAM_BASE__MASK;
+}
+#define A2XX_SQ_VS_PROGRAM_SIZE__MASK 0x00fff000
+#define A2XX_SQ_VS_PROGRAM_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_VS_PROGRAM_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_PROGRAM_SIZE__SHIFT) & A2XX_SQ_VS_PROGRAM_SIZE__MASK;
+}
+
+#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
+}
+
+#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
+
+#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
+#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
+#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
+#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
+#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
+}
+
+#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
+
+#define REG_A2XX_RB_COLORCONTROL 0x00002202
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
+#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
+#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
+#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
+#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
+#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
+}
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
+#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
+static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
+{
+ return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
+}
+#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
+#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
+#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
+#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
+#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
+
+#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
+#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
+#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
+#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
+#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
+#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
+#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
+#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
+#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
+#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
+
+#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
+#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
+#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
+#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
+#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_RB_MODECONTROL 0x00002208
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
+static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
+{
+ return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
+}
+
+#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
+
+#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
+
+#define REG_A2XX_CLEAR_COLOR 0x0000220b
+#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
+#define A2XX_CLEAR_COLOR_RED__SHIFT 0
+static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
+}
+#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
+#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
+static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
+}
+#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
+#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
+static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
+}
+#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
+#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
+static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
+}
+
+#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
+
+#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+}
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
+#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
+}
+
+#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
+#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ENA 0x00000001
+#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK 0x0000007e
+#define A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT 1
+static inline uint32_t A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__SHIFT) & A2XX_PA_SC_VIZ_QUERY_VIZ_QUERY_ID__MASK;
+}
+#define A2XX_PA_SC_VIZ_QUERY_KILL_PIX_POST_EARLY_Z 0x00000100
+
+#define REG_A2XX_VGT_ENHANCE 0x00002294
+
+#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
+}
+#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
+#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
+#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
+
+#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
+#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK 0x00000007
+#define A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT 0
+static inline uint32_t A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__SHIFT) & A2XX_PA_SC_AA_CONFIG_MSAA_NUM_SAMPLES__MASK;
+}
+#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK 0x0001e000
+#define A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT 13
+static inline uint32_t A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__SHIFT) & A2XX_PA_SC_AA_CONFIG_MAX_SAMPLE_DIST__MASK;
+}
+
+#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_SQ_VS_CONST 0x00002307
+#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_PS_CONST 0x00002308
+#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
+
+#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
+
+#define REG_A2XX_PA_SC_AA_MASK 0x00002312
+
+#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
+#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK 0x00000007
+#define A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT 0
+static inline uint32_t A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__SHIFT) & A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL_VTX_REUSE_DEPTH__MASK;
+}
+
+#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
+#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK 0x00000003
+#define A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT 0
+static inline uint32_t A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST(uint32_t val)
+{
+ return ((val) << A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__SHIFT) & A2XX_VGT_OUT_DEALLOC_CNTL_DEALLOC_DIST__MASK;
+}
+
+#define REG_A2XX_RB_COPY_CONTROL 0x00002318
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
+}
+#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
+
+#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
+#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
+#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
+
+#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
+#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
+#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
+}
+#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
+#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
+
+#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
+
+#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
+
+#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
+
+#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
+
+#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_OFFSET 0x00002381
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_SCALE 0x00002382
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
+
+#define REG_A2XX_SQ_CONSTANT_0 0x00004000
+
+#define REG_A2XX_SQ_FETCH_0 0x00004800
+
+#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
+
+#define REG_A2XX_SQ_CF_LOOP 0x00004908
+
+#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
+
+#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
+
+#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_SELECT 0x00000c8b
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_LOW 0x00000c8c
+
+#define REG_A2XX_PA_SU_PERFCOUNTER0_HI 0x00000c8d
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_LOW 0x00000c8e
+
+#define REG_A2XX_PA_SU_PERFCOUNTER1_HI 0x00000c8f
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_LOW 0x00000c90
+
+#define REG_A2XX_PA_SU_PERFCOUNTER2_HI 0x00000c91
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_LOW 0x00000c92
+
+#define REG_A2XX_PA_SU_PERFCOUNTER3_HI 0x00000c93
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_SELECT 0x00000c98
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_LOW 0x00000c99
+
+#define REG_A2XX_PA_SC_PERFCOUNTER0_HI 0x00000c9a
+
+#define REG_A2XX_VGT_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A2XX_VGT_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A2XX_VGT_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A2XX_VGT_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A2XX_VGT_PERFCOUNTER0_LOW 0x00000c4c
+
+#define REG_A2XX_VGT_PERFCOUNTER1_LOW 0x00000c4e
+
+#define REG_A2XX_VGT_PERFCOUNTER2_LOW 0x00000c50
+
+#define REG_A2XX_VGT_PERFCOUNTER3_LOW 0x00000c52
+
+#define REG_A2XX_VGT_PERFCOUNTER0_HI 0x00000c4d
+
+#define REG_A2XX_VGT_PERFCOUNTER1_HI 0x00000c4f
+
+#define REG_A2XX_VGT_PERFCOUNTER2_HI 0x00000c51
+
+#define REG_A2XX_VGT_PERFCOUNTER3_HI 0x00000c53
+
+#define REG_A2XX_TCR_PERFCOUNTER0_SELECT 0x00000e05
+
+#define REG_A2XX_TCR_PERFCOUNTER1_SELECT 0x00000e08
+
+#define REG_A2XX_TCR_PERFCOUNTER0_HI 0x00000e06
+
+#define REG_A2XX_TCR_PERFCOUNTER1_HI 0x00000e09
+
+#define REG_A2XX_TCR_PERFCOUNTER0_LOW 0x00000e07
+
+#define REG_A2XX_TCR_PERFCOUNTER1_LOW 0x00000e0a
+
+#define REG_A2XX_TP0_PERFCOUNTER0_SELECT 0x00000e1f
+
+#define REG_A2XX_TP0_PERFCOUNTER0_HI 0x00000e20
+
+#define REG_A2XX_TP0_PERFCOUNTER0_LOW 0x00000e21
+
+#define REG_A2XX_TP0_PERFCOUNTER1_SELECT 0x00000e22
+
+#define REG_A2XX_TP0_PERFCOUNTER1_HI 0x00000e23
+
+#define REG_A2XX_TP0_PERFCOUNTER1_LOW 0x00000e24
+
+#define REG_A2XX_TCM_PERFCOUNTER0_SELECT 0x00000e54
+
+#define REG_A2XX_TCM_PERFCOUNTER1_SELECT 0x00000e57
+
+#define REG_A2XX_TCM_PERFCOUNTER0_HI 0x00000e55
+
+#define REG_A2XX_TCM_PERFCOUNTER1_HI 0x00000e58
+
+#define REG_A2XX_TCM_PERFCOUNTER0_LOW 0x00000e56
+
+#define REG_A2XX_TCM_PERFCOUNTER1_LOW 0x00000e59
+
+#define REG_A2XX_TCF_PERFCOUNTER0_SELECT 0x00000e5a
+
+#define REG_A2XX_TCF_PERFCOUNTER1_SELECT 0x00000e5d
+
+#define REG_A2XX_TCF_PERFCOUNTER2_SELECT 0x00000e60
+
+#define REG_A2XX_TCF_PERFCOUNTER3_SELECT 0x00000e63
+
+#define REG_A2XX_TCF_PERFCOUNTER4_SELECT 0x00000e66
+
+#define REG_A2XX_TCF_PERFCOUNTER5_SELECT 0x00000e69
+
+#define REG_A2XX_TCF_PERFCOUNTER6_SELECT 0x00000e6c
+
+#define REG_A2XX_TCF_PERFCOUNTER7_SELECT 0x00000e6f
+
+#define REG_A2XX_TCF_PERFCOUNTER8_SELECT 0x00000e72
+
+#define REG_A2XX_TCF_PERFCOUNTER9_SELECT 0x00000e75
+
+#define REG_A2XX_TCF_PERFCOUNTER10_SELECT 0x00000e78
+
+#define REG_A2XX_TCF_PERFCOUNTER11_SELECT 0x00000e7b
+
+#define REG_A2XX_TCF_PERFCOUNTER0_HI 0x00000e5b
+
+#define REG_A2XX_TCF_PERFCOUNTER1_HI 0x00000e5e
+
+#define REG_A2XX_TCF_PERFCOUNTER2_HI 0x00000e61
+
+#define REG_A2XX_TCF_PERFCOUNTER3_HI 0x00000e64
+
+#define REG_A2XX_TCF_PERFCOUNTER4_HI 0x00000e67
+
+#define REG_A2XX_TCF_PERFCOUNTER5_HI 0x00000e6a
+
+#define REG_A2XX_TCF_PERFCOUNTER6_HI 0x00000e6d
+
+#define REG_A2XX_TCF_PERFCOUNTER7_HI 0x00000e70
+
+#define REG_A2XX_TCF_PERFCOUNTER8_HI 0x00000e73
+
+#define REG_A2XX_TCF_PERFCOUNTER9_HI 0x00000e76
+
+#define REG_A2XX_TCF_PERFCOUNTER10_HI 0x00000e79
+
+#define REG_A2XX_TCF_PERFCOUNTER11_HI 0x00000e7c
+
+#define REG_A2XX_TCF_PERFCOUNTER0_LOW 0x00000e5c
+
+#define REG_A2XX_TCF_PERFCOUNTER1_LOW 0x00000e5f
+
+#define REG_A2XX_TCF_PERFCOUNTER2_LOW 0x00000e62
+
+#define REG_A2XX_TCF_PERFCOUNTER3_LOW 0x00000e65
+
+#define REG_A2XX_TCF_PERFCOUNTER4_LOW 0x00000e68
+
+#define REG_A2XX_TCF_PERFCOUNTER5_LOW 0x00000e6b
+
+#define REG_A2XX_TCF_PERFCOUNTER6_LOW 0x00000e6e
+
+#define REG_A2XX_TCF_PERFCOUNTER7_LOW 0x00000e71
+
+#define REG_A2XX_TCF_PERFCOUNTER8_LOW 0x00000e74
+
+#define REG_A2XX_TCF_PERFCOUNTER9_LOW 0x00000e77
+
+#define REG_A2XX_TCF_PERFCOUNTER10_LOW 0x00000e7a
+
+#define REG_A2XX_TCF_PERFCOUNTER11_LOW 0x00000e7d
+
+#define REG_A2XX_SQ_PERFCOUNTER0_SELECT 0x00000dc8
+
+#define REG_A2XX_SQ_PERFCOUNTER1_SELECT 0x00000dc9
+
+#define REG_A2XX_SQ_PERFCOUNTER2_SELECT 0x00000dca
+
+#define REG_A2XX_SQ_PERFCOUNTER3_SELECT 0x00000dcb
+
+#define REG_A2XX_SQ_PERFCOUNTER0_LOW 0x00000dcc
+
+#define REG_A2XX_SQ_PERFCOUNTER0_HI 0x00000dcd
+
+#define REG_A2XX_SQ_PERFCOUNTER1_LOW 0x00000dce
+
+#define REG_A2XX_SQ_PERFCOUNTER1_HI 0x00000dcf
+
+#define REG_A2XX_SQ_PERFCOUNTER2_LOW 0x00000dd0
+
+#define REG_A2XX_SQ_PERFCOUNTER2_HI 0x00000dd1
+
+#define REG_A2XX_SQ_PERFCOUNTER3_LOW 0x00000dd2
+
+#define REG_A2XX_SQ_PERFCOUNTER3_HI 0x00000dd3
+
+#define REG_A2XX_SX_PERFCOUNTER0_SELECT 0x00000dd4
+
+#define REG_A2XX_SX_PERFCOUNTER0_LOW 0x00000dd8
+
+#define REG_A2XX_SX_PERFCOUNTER0_HI 0x00000dd9
+
+#define REG_A2XX_MH_PERFCOUNTER0_SELECT 0x00000a46
+
+#define REG_A2XX_MH_PERFCOUNTER1_SELECT 0x00000a4a
+
+#define REG_A2XX_MH_PERFCOUNTER0_CONFIG 0x00000a47
+
+#define REG_A2XX_MH_PERFCOUNTER1_CONFIG 0x00000a4b
+
+#define REG_A2XX_MH_PERFCOUNTER0_LOW 0x00000a48
+
+#define REG_A2XX_MH_PERFCOUNTER1_LOW 0x00000a4c
+
+#define REG_A2XX_MH_PERFCOUNTER0_HI 0x00000a49
+
+#define REG_A2XX_MH_PERFCOUNTER1_HI 0x00000a4d
+
+#define REG_A2XX_RB_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A2XX_RB_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A2XX_RB_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A2XX_RB_PERFCOUNTER3_SELECT 0x00000f07
+
+#define REG_A2XX_RB_PERFCOUNTER0_LOW 0x00000f08
+
+#define REG_A2XX_RB_PERFCOUNTER0_HI 0x00000f09
+
+#define REG_A2XX_RB_PERFCOUNTER1_LOW 0x00000f0a
+
+#define REG_A2XX_RB_PERFCOUNTER1_HI 0x00000f0b
+
+#define REG_A2XX_RB_PERFCOUNTER2_LOW 0x00000f0c
+
+#define REG_A2XX_RB_PERFCOUNTER2_HI 0x00000f0d
+
+#define REG_A2XX_RB_PERFCOUNTER3_LOW 0x00000f0e
+
+#define REG_A2XX_RB_PERFCOUNTER3_HI 0x00000f0f
+
+#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+ return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
+#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
+#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
+#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
+#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
+}
+#define A2XX_SQ_TEX_0_TILED 0x80000000
+
+#define REG_A2XX_SQ_TEX_1 0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+ return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+ return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED 0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+ return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_2 0x00000002
+#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
+#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
+}
+#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
+#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
+}
+#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+ return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
+#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
+#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
+#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
+#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
+}
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(int32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
+static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
+static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4 0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5 0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+ return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+ return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
+
+
+#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644
index 0000000000..0d8133f317
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 18);
+
+ /* All fields present (bits 9:0) */
+ OUT_RING(ring, 0x000003ff);
+ /* Disable/Enable Real-Time Stream processing (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+ /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+
+ OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+ OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+ OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+ /* Vertex and Pixel Shader Start Addresses in instructions
+ * (3 DWORDS per instruction) */
+ OUT_RING(ring, 0x80000180);
+ /* Maximum Contexts */
+ OUT_RING(ring, 0x00000001);
+ /* Write Confirm Interval and The CP will wait the
+ * wait_interval * 16 clocks between polling */
+ OUT_RING(ring, 0x00000000);
+ /* NQ and External Memory Swap */
+ OUT_RING(ring, 0x00000000);
+ /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+ if (a2xx_gpu->protection_disabled)
+ OUT_RING(ring, 0x00000000);
+ else
+ OUT_RING(ring, 0x200001f2);
+ /* Disable header dumping and Header dump address */
+ OUT_RING(ring, 0x00000000);
+ /* Header dump size */
+ OUT_RING(ring, 0x00000000);
+
+ if (!a2xx_gpu->protection_disabled) {
+ /* enable protected mode */
+ OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+ }
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+ return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+ dma_addr_t pt_base, tran_error;
+ uint32_t *ptr, len;
+ int i, ret;
+
+ msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+ DBG("%s", gpu->name);
+
+ /* halt ME to avoid ucode upload issues on a20x */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ /* note: kgsl uses 0x00000001 after first reset on a22x */
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+ msleep(30);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+ /* note: kgsl uses 0x0000ffff for a20x */
+ gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+ /* MPU: physical range */
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+ A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+ /* same as parameters in adreno_gpu */
+ gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+ A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+ gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+ gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+ A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+ A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+ A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+ if (!adreno_is_a20x(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+ gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+ gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+ /* note: gsl doesn't set this */
+ gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+ A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+ gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+ AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+ AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB1_INT_MASK |
+ AXXX_CP_INT_CNTL_RB_INT_MASK);
+ gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+ A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+ A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+ A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+ for (i = 3; i <= 5; i++)
+ if ((SZ_16K << i) == adreno_gpu->info->gmem)
+ break;
+ gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ /*
+ * New firmware files seem to have GPU and firmware version in this
+ * word (0x20xxxx for A200, 0x220xxx for A220, 0x225xxx for A225).
+ * Older firmware files, which lack protection support, have 0 instead.
+ */
+ if (ptr[1] == 0) {
+ dev_warn(gpu->dev->dev,
+ "Legacy firmware detected, disabling protection support\n");
+ a2xx_gpu->protection_disabled = true;
+ }
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a2xx_dump(gpu);
+
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+ gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+ adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+ A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t mstatus, status;
+
+ mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+ dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+ dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+ gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+ status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+ /* only RB_INT is expected */
+ if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+ dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+ dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+ }
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+ return state;
+}
+
+static struct msm_gem_address_space *
+a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ 0xfff * SZ_64K);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = a2xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = a2xx_create_address_space,
+ .get_rptr = a2xx_get_rptr,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+ struct a2xx_gpu *a2xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a2xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+ if (!a2xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a2xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ if (adreno_is_a20x(adreno_gpu))
+ adreno_gpu->registers = a200_registers;
+ else if (adreno_is_a225(adreno_gpu))
+ adreno_gpu->registers = a225_registers;
+ else
+ adreno_gpu->registers = a220_registers;
+
+ if (!gpu->aspace) {
+ dev_err(dev->dev, "No memory protection without MMU\n");
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
+ }
+
+ return gpu;
+
+fail:
+ if (a2xx_gpu)
+ a2xx_destroy(&a2xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644
index 0000000000..161a075f94
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+ struct adreno_gpu base;
+ bool pm_enabled;
+ bool protection_disabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 0000000000..237b564445
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,3247 @@
+#ifndef A3XX_XML
+#define A3XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a3xx_tile_mode {
+ LINEAR = 0,
+ TILE_4X4 = 1,
+ TILE_32X32 = 2,
+ TILE_4X2 = 3,
+};
+
+enum a3xx_state_block_id {
+ HLSQ_BLOCK_ID_TP_TEX = 2,
+ HLSQ_BLOCK_ID_TP_MIPMAP = 3,
+ HLSQ_BLOCK_ID_SP_VS = 4,
+ HLSQ_BLOCK_ID_SP_FS = 6,
+};
+
+enum a3xx_cache_opcode {
+ INVALIDATE = 1,
+};
+
+enum a3xx_vtx_fmt {
+ VFMT_32_FLOAT = 0,
+ VFMT_32_32_FLOAT = 1,
+ VFMT_32_32_32_FLOAT = 2,
+ VFMT_32_32_32_32_FLOAT = 3,
+ VFMT_16_FLOAT = 4,
+ VFMT_16_16_FLOAT = 5,
+ VFMT_16_16_16_FLOAT = 6,
+ VFMT_16_16_16_16_FLOAT = 7,
+ VFMT_32_FIXED = 8,
+ VFMT_32_32_FIXED = 9,
+ VFMT_32_32_32_FIXED = 10,
+ VFMT_32_32_32_32_FIXED = 11,
+ VFMT_16_SINT = 16,
+ VFMT_16_16_SINT = 17,
+ VFMT_16_16_16_SINT = 18,
+ VFMT_16_16_16_16_SINT = 19,
+ VFMT_16_UINT = 20,
+ VFMT_16_16_UINT = 21,
+ VFMT_16_16_16_UINT = 22,
+ VFMT_16_16_16_16_UINT = 23,
+ VFMT_16_SNORM = 24,
+ VFMT_16_16_SNORM = 25,
+ VFMT_16_16_16_SNORM = 26,
+ VFMT_16_16_16_16_SNORM = 27,
+ VFMT_16_UNORM = 28,
+ VFMT_16_16_UNORM = 29,
+ VFMT_16_16_16_UNORM = 30,
+ VFMT_16_16_16_16_UNORM = 31,
+ VFMT_32_UINT = 32,
+ VFMT_32_32_UINT = 33,
+ VFMT_32_32_32_UINT = 34,
+ VFMT_32_32_32_32_UINT = 35,
+ VFMT_32_SINT = 36,
+ VFMT_32_32_SINT = 37,
+ VFMT_32_32_32_SINT = 38,
+ VFMT_32_32_32_32_SINT = 39,
+ VFMT_8_UINT = 40,
+ VFMT_8_8_UINT = 41,
+ VFMT_8_8_8_UINT = 42,
+ VFMT_8_8_8_8_UINT = 43,
+ VFMT_8_UNORM = 44,
+ VFMT_8_8_UNORM = 45,
+ VFMT_8_8_8_UNORM = 46,
+ VFMT_8_8_8_8_UNORM = 47,
+ VFMT_8_SINT = 48,
+ VFMT_8_8_SINT = 49,
+ VFMT_8_8_8_SINT = 50,
+ VFMT_8_8_8_8_SINT = 51,
+ VFMT_8_SNORM = 52,
+ VFMT_8_8_SNORM = 53,
+ VFMT_8_8_8_SNORM = 54,
+ VFMT_8_8_8_8_SNORM = 55,
+ VFMT_10_10_10_2_UINT = 56,
+ VFMT_10_10_10_2_UNORM = 57,
+ VFMT_10_10_10_2_SINT = 58,
+ VFMT_10_10_10_2_SNORM = 59,
+ VFMT_2_10_10_10_UINT = 60,
+ VFMT_2_10_10_10_UNORM = 61,
+ VFMT_2_10_10_10_SINT = 62,
+ VFMT_2_10_10_10_SNORM = 63,
+ VFMT_NONE = 255,
+};
+
+enum a3xx_tex_fmt {
+ TFMT_5_6_5_UNORM = 4,
+ TFMT_5_5_5_1_UNORM = 5,
+ TFMT_4_4_4_4_UNORM = 7,
+ TFMT_Z16_UNORM = 9,
+ TFMT_X8Z24_UNORM = 10,
+ TFMT_Z32_FLOAT = 11,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
+ TFMT_I420_Y = 24,
+ TFMT_I420_U = 26,
+ TFMT_I420_V = 27,
+ TFMT_ATC_RGB = 32,
+ TFMT_ATC_RGBA_EXPLICIT = 33,
+ TFMT_ETC1 = 34,
+ TFMT_ATC_RGBA_INTERPOLATED = 35,
+ TFMT_DXT1 = 36,
+ TFMT_DXT3 = 37,
+ TFMT_DXT5 = 38,
+ TFMT_2_10_10_10_UNORM = 40,
+ TFMT_10_10_10_2_UNORM = 41,
+ TFMT_9_9_9_E5_FLOAT = 42,
+ TFMT_11_11_10_FLOAT = 43,
+ TFMT_A8_UNORM = 44,
+ TFMT_L8_UNORM = 45,
+ TFMT_L8_A8_UNORM = 47,
+ TFMT_8_UNORM = 48,
+ TFMT_8_8_UNORM = 49,
+ TFMT_8_8_8_UNORM = 50,
+ TFMT_8_8_8_8_UNORM = 51,
+ TFMT_8_SNORM = 52,
+ TFMT_8_8_SNORM = 53,
+ TFMT_8_8_8_SNORM = 54,
+ TFMT_8_8_8_8_SNORM = 55,
+ TFMT_8_UINT = 56,
+ TFMT_8_8_UINT = 57,
+ TFMT_8_8_8_UINT = 58,
+ TFMT_8_8_8_8_UINT = 59,
+ TFMT_8_SINT = 60,
+ TFMT_8_8_SINT = 61,
+ TFMT_8_8_8_SINT = 62,
+ TFMT_8_8_8_8_SINT = 63,
+ TFMT_16_FLOAT = 64,
+ TFMT_16_16_FLOAT = 65,
+ TFMT_16_16_16_16_FLOAT = 67,
+ TFMT_16_UINT = 68,
+ TFMT_16_16_UINT = 69,
+ TFMT_16_16_16_16_UINT = 71,
+ TFMT_16_SINT = 72,
+ TFMT_16_16_SINT = 73,
+ TFMT_16_16_16_16_SINT = 75,
+ TFMT_16_UNORM = 76,
+ TFMT_16_16_UNORM = 77,
+ TFMT_16_16_16_16_UNORM = 79,
+ TFMT_16_SNORM = 80,
+ TFMT_16_16_SNORM = 81,
+ TFMT_16_16_16_16_SNORM = 83,
+ TFMT_32_FLOAT = 84,
+ TFMT_32_32_FLOAT = 85,
+ TFMT_32_32_32_32_FLOAT = 87,
+ TFMT_32_UINT = 88,
+ TFMT_32_32_UINT = 89,
+ TFMT_32_32_32_32_UINT = 91,
+ TFMT_32_SINT = 92,
+ TFMT_32_32_SINT = 93,
+ TFMT_32_32_32_32_SINT = 95,
+ TFMT_2_10_10_10_UINT = 96,
+ TFMT_10_10_10_2_UINT = 97,
+ TFMT_ETC2_RG11_SNORM = 112,
+ TFMT_ETC2_RG11_UNORM = 113,
+ TFMT_ETC2_R11_SNORM = 114,
+ TFMT_ETC2_R11_UNORM = 115,
+ TFMT_ETC2_RGBA8 = 116,
+ TFMT_ETC2_RGB8A1 = 117,
+ TFMT_ETC2_RGB8 = 118,
+ TFMT_NONE = 255,
+};
+
+enum a3xx_color_fmt {
+ RB_R5G6B5_UNORM = 0,
+ RB_R5G5B5A1_UNORM = 1,
+ RB_R4G4B4A4_UNORM = 3,
+ RB_R8G8B8_UNORM = 4,
+ RB_R8G8B8A8_UNORM = 8,
+ RB_R8G8B8A8_SNORM = 9,
+ RB_R8G8B8A8_UINT = 10,
+ RB_R8G8B8A8_SINT = 11,
+ RB_R8G8_UNORM = 12,
+ RB_R8G8_SNORM = 13,
+ RB_R8G8_UINT = 14,
+ RB_R8G8_SINT = 15,
+ RB_R10G10B10A2_UNORM = 16,
+ RB_A2R10G10B10_UNORM = 17,
+ RB_R10G10B10A2_UINT = 18,
+ RB_A2R10G10B10_UINT = 19,
+ RB_A8_UNORM = 20,
+ RB_R8_UNORM = 21,
+ RB_R16_FLOAT = 24,
+ RB_R16G16_FLOAT = 25,
+ RB_R16G16B16A16_FLOAT = 27,
+ RB_R11G11B10_FLOAT = 28,
+ RB_R16_SNORM = 32,
+ RB_R16G16_SNORM = 33,
+ RB_R16G16B16A16_SNORM = 35,
+ RB_R16_UNORM = 36,
+ RB_R16G16_UNORM = 37,
+ RB_R16G16B16A16_UNORM = 39,
+ RB_R16_SINT = 40,
+ RB_R16G16_SINT = 41,
+ RB_R16G16B16A16_SINT = 43,
+ RB_R16_UINT = 44,
+ RB_R16G16_UINT = 45,
+ RB_R16G16B16A16_UINT = 47,
+ RB_R32_FLOAT = 48,
+ RB_R32G32_FLOAT = 49,
+ RB_R32G32B32A32_FLOAT = 51,
+ RB_R32_SINT = 52,
+ RB_R32G32_SINT = 53,
+ RB_R32G32B32A32_SINT = 55,
+ RB_R32_UINT = 56,
+ RB_R32G32_UINT = 57,
+ RB_R32G32B32A32_UINT = 59,
+ RB_NONE = 255,
+};
+
+enum a3xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_AHB_PFPTRANS_WAIT = 3,
+ CP_AHB_NRTTRANS_WAIT = 6,
+ CP_CSF_NRT_READ_WAIT = 8,
+ CP_CSF_I1_FIFO_FULL = 9,
+ CP_CSF_I2_FIFO_FULL = 10,
+ CP_CSF_ST_FIFO_FULL = 11,
+ CP_RESERVED_12 = 12,
+ CP_CSF_RING_ROQ_FULL = 13,
+ CP_CSF_I1_ROQ_FULL = 14,
+ CP_CSF_I2_ROQ_FULL = 15,
+ CP_CSF_ST_ROQ_FULL = 16,
+ CP_RESERVED_17 = 17,
+ CP_MIU_TAG_MEM_FULL = 18,
+ CP_MIU_NRT_WRITE_STALLED = 22,
+ CP_MIU_NRT_READ_STALLED = 23,
+ CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+ CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ CP_ME_MICRO_RB_STARVED = 30,
+ CP_AHB_RBBM_DWORD_SENT = 40,
+ CP_ME_BUSY_CLOCKS = 41,
+ CP_ME_WAIT_CONTEXT_AVAIL = 42,
+ CP_PFP_TYPE0_PACKET = 43,
+ CP_PFP_TYPE3_PACKET = 44,
+ CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+ CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+ CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+ CP_CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a3xx_gras_tse_perfcounter_select {
+ GRAS_TSEPERF_INPUT_PRIM = 0,
+ GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+ GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+ GRAS_TSEPERF_CLIPPED_PRIM = 3,
+ GRAS_TSEPERF_NEW_PRIM = 4,
+ GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+ GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+ GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+ GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+ GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+ GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+ GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+ GRAS_TSEPERF_WORKING_CYCLES = 12,
+ GRAS_TSEPERF_PC_STARVE = 13,
+ GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+ GRAS_RASPERF_16X16_TILES = 0,
+ GRAS_RASPERF_8X8_TILES = 1,
+ GRAS_RASPERF_4X4_TILES = 2,
+ GRAS_RASPERF_WORKING_CYCLES = 3,
+ GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+ GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+ GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+ HLSQ_PERF_SP_VS_CONSTANT = 0,
+ HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+ HLSQ_PERF_SP_FS_CONSTANT = 2,
+ HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+ HLSQ_PERF_TP_STATE = 4,
+ HLSQ_PERF_QUADS = 5,
+ HLSQ_PERF_PIXELS = 6,
+ HLSQ_PERF_VERTICES = 7,
+ HLSQ_PERF_FS8_THREADS = 8,
+ HLSQ_PERF_FS16_THREADS = 9,
+ HLSQ_PERF_FS32_THREADS = 10,
+ HLSQ_PERF_VS8_THREADS = 11,
+ HLSQ_PERF_VS16_THREADS = 12,
+ HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+ HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+ HLSQ_PERF_ACTIVE_CYCLES = 15,
+ HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+ HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+ HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+ HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+ HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+ HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+ HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+ HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+ HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+ PC_PCPERF_VISIBILITY_STREAMS = 0,
+ PC_PCPERF_TOTAL_INSTANCES = 1,
+ PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+ PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+ PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+ PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+ PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+ PC_PCPERF_VERTICES_TO_VFD = 7,
+ PC_PCPERF_REUSED_VERTICES = 8,
+ PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+ PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+ PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+ PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+ RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+ RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+ RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+ RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+ RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+ RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+ RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+ RB_RBPERF_RB_MARB_DATA = 7,
+ RB_RBPERF_SP_RB_QUAD = 8,
+ RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+ RB_RBPERF_GMEM_CH0_READ = 10,
+ RB_RBPERF_GMEM_CH1_READ = 11,
+ RB_RBPERF_GMEM_CH0_WRITE = 12,
+ RB_RBPERF_GMEM_CH1_WRITE = 13,
+ RB_RBPERF_CP_CONTEXT_DONE = 14,
+ RB_RBPERF_CP_CACHE_FLUSH = 15,
+ RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+ RBBM_ALAWYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TEX_BUSY = 12,
+ RBBM_ANY_USP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_RBBM_STATUS_MASKED = 22,
+};
+
+enum a3xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_UCHE_LOAD_INSTRUCTIONS = 3,
+ SP_UCHE_STORE_INSTRUCTIONS = 4,
+ SP_UCHE_ATOMICS = 5,
+ SP_VS_TEX_INSTRUCTIONS = 6,
+ SP_VS_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_EFU_INSTRUCTIONS = 8,
+ SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_TEX_INSTRUCTIONS = 11,
+ SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_EFU_INSTRUCTIONS = 13,
+ SP_FS_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+ SP_FS_BARY_INSTRUCTIONS = 16,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_ACTIVE_CYCLES = 29,
+ SP_EFU_ACTIVE_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_ACTIVE_CYCLES_ANY = 35,
+ SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+ TPL1_TPPERF_L1_REQUESTS = 0,
+ TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+ TPL1_TPPERF_TP0_L1_MISSES = 2,
+ TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+ TPL1_TPPERF_TP1_L1_MISSES = 4,
+ TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+ TPL1_TPPERF_TP2_L1_MISSES = 6,
+ TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+ TPL1_TPPERF_TP3_L1_MISSES = 8,
+ TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+ TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+ TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+ TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+ TPL1_TPPERF_BILINEAR_OPS = 13,
+ TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+ TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+ TPL1_TPPERF_QUADS_ARRAY = 16,
+ TPL1_TPPERF_QUADS_PROJECTION = 17,
+ TPL1_TPPERF_QUADS_GRADIENT = 18,
+ TPL1_TPPERF_QUADS_1D2D = 19,
+ TPL1_TPPERF_QUADS_3DCUBE = 20,
+ TPL1_TPPERF_ZERO_LOD = 21,
+ TPL1_TPPERF_OUTPUT_TEXELS = 22,
+ TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+ TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+ TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+ TPL1_TPPERF_LATENCY = 26,
+ TPL1_TPPERF_LATENCY_TRANS = 27,
+};
+
+enum a3xx_vfd_perfcounter_select {
+ VFD_PERF_UCHE_BYTE_FETCHED = 0,
+ VFD_PERF_UCHE_TRANS = 1,
+ VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+ VFD_PERF_FETCH_INSTRUCTIONS = 3,
+ VFD_PERF_DECODE_INSTRUCTIONS = 4,
+ VFD_PERF_ACTIVE_CYCLES = 5,
+ VFD_PERF_STALL_CYCLES_UCHE = 6,
+ VFD_PERF_STALL_CYCLES_HLSQ = 7,
+ VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
+};
+
+enum a3xx_vpc_perfcounter_select {
+ VPC_PERF_SP_LM_PRIMITIVES = 0,
+ VPC_PERF_COMPONENTS_FROM_SP = 1,
+ VPC_PERF_SP_LM_COMPONENTS = 2,
+ VPC_PERF_ACTIVE_CYCLES = 3,
+ VPC_PERF_STALL_CYCLES_LM = 4,
+ VPC_PERF_STALL_CYCLES_RAS = 5,
+};
+
+enum a3xx_uche_perfcounter_select {
+ UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+ UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+ UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+ UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+ UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+ UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+ UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+ UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+ UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+ UCHE_UCHEPERF_EVICTS = 16,
+ UCHE_UCHEPERF_FLUSHES = 17,
+ UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+ UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+ UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
+};
+
+enum a3xx_intp_mode {
+ SMOOTH = 0,
+ FLAT = 1,
+ ZERO = 2,
+ ONE = 3,
+};
+
+enum a3xx_repl_mode {
+ S = 1,
+ T = 2,
+ ONE_T = 3,
+};
+
+enum a3xx_tex_filter {
+ A3XX_TEX_NEAREST = 0,
+ A3XX_TEX_LINEAR = 1,
+ A3XX_TEX_ANISO = 2,
+};
+
+enum a3xx_tex_clamp {
+ A3XX_TEX_REPEAT = 0,
+ A3XX_TEX_CLAMP_TO_EDGE = 1,
+ A3XX_TEX_MIRROR_REPEAT = 2,
+ A3XX_TEX_CLAMP_TO_BORDER = 3,
+ A3XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a3xx_tex_aniso {
+ A3XX_TEX_ANISO_1 = 0,
+ A3XX_TEX_ANISO_2 = 1,
+ A3XX_TEX_ANISO_4 = 2,
+ A3XX_TEX_ANISO_8 = 3,
+ A3XX_TEX_ANISO_16 = 4,
+};
+
+enum a3xx_tex_swiz {
+ A3XX_TEX_X = 0,
+ A3XX_TEX_Y = 1,
+ A3XX_TEX_Z = 2,
+ A3XX_TEX_W = 3,
+ A3XX_TEX_ZERO = 4,
+ A3XX_TEX_ONE = 5,
+};
+
+enum a3xx_tex_type {
+ A3XX_TEX_1D = 0,
+ A3XX_TEX_2D = 1,
+ A3XX_TEX_CUBE = 2,
+ A3XX_TEX_3D = 3,
+};
+
+enum a3xx_tex_msaa {
+ A3XX_TPL1_MSAA1X = 0,
+ A3XX_TPL1_MSAA2X = 1,
+ A3XX_TPL1_MSAA4X = 2,
+ A3XX_TPL1_MSAA8X = 3,
+};
+
+#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A3XX_INT0_VFD_ERROR 0x00000040
+#define A3XX_INT0_CP_SW_INT 0x00000080
+#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A3XX_INT0_CP_HW_FAULT 0x00000800
+#define A3XX_INT0_CP_DMA 0x00001000
+#define A3XX_INT0_CP_IB2_INT 0x00002000
+#define A3XX_INT0_CP_IB1_INT 0x00004000
+#define A3XX_INT0_CP_RB_INT 0x00008000
+#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A3XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
+
+#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
+
+#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
+
+#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
+
+#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
+
+#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
+
+#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
+
+#define REG_A3XX_RBBM_AHB_CMD 0x00000022
+
+#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
+
+#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
+
+#define REG_A3XX_RBBM_STATUS 0x00000030
+#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A3XX_RBBM_NQWAIT_UNTIL 0x00000040
+
+#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
+
+#define REG_A3XX_RBBM_INT_SET_CMD 0x00000060
+
+#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
+
+#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
+
+#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
+
+#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
+
+#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
+
+#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
+
+#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
+
+#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
+
+#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
+
+#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
+
+#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
+
+#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
+
+#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
+
+#define REG_A3XX_CP_ROQ_DATA 0x000001cd
+
+#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
+
+#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
+
+#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
+
+#define REG_A3XX_CP_MEQ_ADDR 0x000001da
+
+#define REG_A3XX_CP_MEQ_DATA 0x000001db
+
+#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
+
+#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
+
+#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A3XX_CP_HW_FAULT 0x0000045c
+
+#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
+
+#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
+
+static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+
+#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
+
+#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+
+#define REG_A3XX_TP0_CHICKEN 0x00000e1e
+
+#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
+
+#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
+
+#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTER 0x00002000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTROID 0x00004000
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_NON_PERSP_CENTROID 0x00008000
+#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
+#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
+#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
+#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK 0x1c000000
+#define A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT 26
+static inline uint32_t A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__SHIFT) & A3XX_GRAS_CL_CLIP_CNTL_NUM_USER_CLIP_PLANES__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
+#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
+#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
+#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
+#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
+#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
+{
+ return ((((int32_t)(val * 1048576.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((((int32_t)(val * 64.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A3XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+
+#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
+static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
+#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
+static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_RB_MODE_CONTROL_MRT__MASK 0x00003000
+#define A3XX_RB_MODE_CONTROL_MRT__SHIFT 12
+static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val)
+{
+ return ((val) << A3XX_RB_MODE_CONTROL_MRT__SHIFT) & A3XX_RB_MODE_CONTROL_MRT__MASK;
+}
+#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
+#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
+
+#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE 0x00000001
+#define A3XX_RB_RENDER_CONTROL_YUV_IN_ENABLE 0x00000002
+#define A3XX_RB_RENDER_CONTROL_COV_VALUE_INPUT_ENABLE 0x00000004
+#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
+static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
+#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK 0x0003c000
+#define A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT 14
+static inline uint32_t A3XX_RB_RENDER_CONTROL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_COORD_MASK__SHIFT) & A3XX_RB_RENDER_CONTROL_COORD_MASK__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_I_CLAMP_ENABLE 0x00080000
+#define A3XX_RB_RENDER_CONTROL_COV_VALUE_OUTPUT_ENABLE 0x00100000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
+static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_COVERAGE 0x40000000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TO_ONE 0x80000000
+
+#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
+#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A3XX_RB_ALPHA_REF 0x000020c3
+#define A3XX_RB_ALPHA_REF_UINT__MASK 0x0000ff00
+#define A3XX_RB_ALPHA_REF_UINT__SHIFT 8
+static inline uint32_t A3XX_RB_ALPHA_REF_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_ALPHA_REF_UINT__SHIFT) & A3XX_RB_ALPHA_REF_UINT__MASK;
+}
+#define A3XX_RB_ALPHA_REF_FLOAT__MASK 0xffff0000
+#define A3XX_RB_ALPHA_REF_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_ALPHA_REF_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_ALPHA_REF_FLOAT__SHIFT) & A3XX_RB_ALPHA_REF_FLOAT__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
+
+#define REG_A3XX_RB_BLEND_RED 0x000020e4
+#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
+#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
+#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
+#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW0 0x000020e8
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW1 0x000020e9
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW2 0x000020ea
+
+#define REG_A3XX_RB_CLEAR_COLOR_DW3 0x000020eb
+
+#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008
+#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
+static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
+#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
+#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
+#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
+#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
+#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002
+#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000
+
+#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
+
+#define REG_A3XX_RB_DEPTH_INFO 0x00002102
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
+#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A3XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
+
+#define REG_A3XX_RB_STENCIL_INFO 0x00002106
+#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff800
+#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11
+static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_PITCH 0x00002107
+#define A3XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A3XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 3) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_STENCILREFMASK 0x00002108
+#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_LRZ_VSC_CONTROL 0x0000210c
+#define A3XX_RB_LRZ_VSC_CONTROL_BINNING_ENABLE 0x00000002
+
+#define REG_A3XX_RB_WINDOW_OFFSET 0x0000210e
+#define A3XX_RB_WINDOW_OFFSET_X__MASK 0x0000ffff
+#define A3XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_OFFSET_X__SHIFT) & A3XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A3XX_RB_WINDOW_OFFSET_Y__MASK 0xffff0000
+#define A3XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_OFFSET_Y__SHIFT) & A3XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
+
+#define REG_A3XX_RB_Z_CLAMP_MIN 0x00002114
+
+#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+
+#define REG_A3XX_VGT_BIN_BASE 0x000021e1
+
+#define REG_A3XX_VGT_BIN_SIZE 0x000021e2
+
+#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
+#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
+
+#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_ENABLE 0x00001000
+#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
+#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
+
+#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
+
+#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100
+#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000
+#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK 0x000000ff
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK 0x0000ff00
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT 8
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTERREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJPERSPCENTROIDREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_IJNONPERSPCENTROIDREGID__MASK;
+}
+
+#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
+}
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; }
+
+#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
+
+#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
+
+#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
+
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
+
+#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
+
+#define REG_A3XX_VFD_CONTROL_0 0x00002240
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
+static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A3XX_VFD_CONTROL_1 0x00002241
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A3XX_VFD_INDEX_MIN 0x00002242
+
+#define REG_A3XX_VFD_INDEX_MAX 0x00002243
+
+#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
+
+#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+
+static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0000ff80
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_INSTANCED 0x00010000
+#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
+#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
+}
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
+}
+
+#define REG_A3XX_VPC_ATTR 0x00002280
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
+#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A3XX_VPC_ATTR_PSIZE 0x00000200
+#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
+#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
+#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
+static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
+}
+
+#define REG_A3XX_VPC_PACK 0x00002281
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
+#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
+#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
+#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
+#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
+#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
+#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
+#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
+#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
+#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
+#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
+#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
+#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
+}
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
+#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
+static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
+}
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT 0
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C0(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK 0x0000000c
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT 2
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C1(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK 0x00000030
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT 4
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C2(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK 0x000000c0
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT 6
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C3(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK 0x00000300
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT 8
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C4(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK 0x00000c00
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT 10
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C5(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK 0x00003000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT 12
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C6(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK 0x0000c000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT 14
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C7(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK 0x00030000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT 16
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C8(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK 0x000c0000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT 18
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C9(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK 0x00300000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT 20
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CA(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK 0x00c00000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT 22
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CB(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK 0x03000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT 24
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CC(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK 0x0c000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT 26
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CD(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK 0x30000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT 28
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CE(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK;
+}
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK 0xc0000000
+#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT 30
+static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CF(enum a3xx_repl_mode val)
+{
+ return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK;
+}
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
+
+#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
+#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x00040000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
+static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_BINNING 0x00080000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
+static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
+#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
+
+#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
+
+#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
+
+#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
+
+#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
+
+#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
+
+#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+#define A3XX_SP_FS_OUTPUT_REG_MRT__MASK 0x00000003
+#define A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A3XX_SP_FS_OUTPUT_REG_MRT__MASK;
+}
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
+#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
+static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
+
+#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
+
+#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
+
+#define REG_A3XX_VBIF_CLKON 0x00003001
+
+#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
+
+#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
+
+#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
+
+#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
+
+#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
+
+#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+
+#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070
+#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071
+#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072
+
+#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073
+
+#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074
+
+#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075
+
+#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c
+
+#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
+#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
+
+static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
+#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
+#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
+#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
+#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
+}
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A3XX_VSC_BIN_CONTROL 0x00000c3c
+#define A3XX_VSC_BIN_CONTROL_BINNING_ENABLE 0x00000001
+
+#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
+
+#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A3XX_GRAS_TSE_DEBUG_ECO 0x00000c81
+
+#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
+
+#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0x00000cc1
+
+#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
+
+#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
+
+#define REG_A3XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x0fffc000
+#define A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 14
+static inline uint32_t A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A3XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
+
+#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
+
+#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
+
+#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
+
+#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
+
+#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
+
+#define REG_A3XX_UNKNOWN_0E43 0x00000e43
+
+#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
+
+#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
+
+#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
+
+#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
+
+#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
+
+#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
+
+#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
+
+#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
+
+#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
+
+#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
+
+#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
+}
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+
+#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6
+
+#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
+
+#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
+
+#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
+
+#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
+
+#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
+
+#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
+
+#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
+
+#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
+
+#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
+
+#define REG_A3XX_UNKNOWN_0F03 0x00000f03
+
+#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
+
+#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
+
+#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
+
+#define REG_A3XX_VGT_CL_INITIATOR 0x000021f0
+
+#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
+
+#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
+}
+
+#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
+
+#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_CLAMPENABLE 0x00000001
+#define A3XX_TEX_SAMP_0_MIPFILTER_LINEAR 0x00000002
+#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
+#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
+#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
+#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
+#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
+#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A3XX_TEX_SAMP_0_ANISO__MASK 0x00038000
+#define A3XX_TEX_SAMP_0_ANISO__SHIFT 15
+static inline uint32_t A3XX_TEX_SAMP_0_ANISO(enum a3xx_tex_aniso val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_ANISO__SHIFT) & A3XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
+static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
+}
+#define A3XX_TEX_SAMP_0_CUBEMAPSEAMLESSFILTOFF 0x01000000
+#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
+
+#define REG_A3XX_TEX_SAMP_1 0x00000001
+#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
+#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
+static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
+}
+#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
+#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
+#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
+static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_0 0x00000000
+#define A3XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A3XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_0_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TILE_MODE__SHIFT) & A3XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A3XX_TEX_CONST_0_SRGB 0x00000004
+#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A3XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A3XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A3XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MIPLVLS__SHIFT) & A3XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A3XX_TEX_CONST_0_MSAATEX__MASK 0x00300000
+#define A3XX_TEX_CONST_0_MSAATEX__SHIFT 20
+static inline uint32_t A3XX_TEX_CONST_0_MSAATEX(enum a3xx_tex_msaa val)
+{
+ return ((val) << A3XX_TEX_CONST_0_MSAATEX__SHIFT) & A3XX_TEX_CONST_0_MSAATEX__MASK;
+}
+#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A3XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
+{
+ return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
+}
+#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000
+#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
+#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_1 0x00000001
+#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
+#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
+#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
+static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A3XX_TEX_CONST_1_PITCHALIGN__MASK 0xf0000000
+#define A3XX_TEX_CONST_1_PITCHALIGN__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_1_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_PITCHALIGN__SHIFT) & A3XX_TEX_CONST_1_PITCHALIGN__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_2 0x00000002
+#define A3XX_TEX_CONST_2_INDX__MASK 0x000001ff
+#define A3XX_TEX_CONST_2_INDX__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
+}
+#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
+#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
+static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_3 0x00000003
+#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0001ffff
+#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
+}
+#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
+#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
+static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
+}
+#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
+#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+
+#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 0000000000..c86b377f6f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#include "a3xx_gpu.h"
+
+#define A3XX_INT0_MASK \
+ (A3XX_INT0_RBBM_AHB_ERROR | \
+ A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A3XX_INT0_CP_T0_PACKET_IN_IB | \
+ A3XX_INT0_CP_OPCODE_ERROR | \
+ A3XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A3XX_INT0_CP_HW_FAULT | \
+ A3XX_INT0_CP_IB1_INT | \
+ A3XX_INT0_CP_IB2_INT | \
+ A3XX_INT0_CP_RB_INT | \
+ A3XX_INT0_CP_REG_PROTECT_FAULT | \
+ A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_CACHE_FLUSH_TS | \
+ A3XX_INT0_UCHE_OOB_ACCESS)
+
+extern bool hang_debug;
+
+static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
+
+static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+
+#if 0
+ /* Dummy set-constant to trigger context rollover */
+ OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+ OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+ OUT_RING(ring, 0x00000000);
+#endif
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
+static bool a3xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+ return a3xx_idle(gpu);
+}
+
+static int a3xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ DBG("%s", gpu->name);
+
+ if (adreno_is_a305(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ } else if (adreno_is_a306(adreno_gpu)) {
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
+ } else if (adreno_is_a320(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+
+ } else if (adreno_is_a330v2(adreno_gpu)) {
+ /*
+ * Most of the VBIF registers on 8974v2 have the correct
+ * values at power on, so we won't modify those if we don't
+ * need to
+ */
+ /* Enable 1k sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003f);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003f003f);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001003f);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Disable VBIF clock gating. This is to enable AXI running
+ * higher frequency than GPU:
+ */
+ gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
+
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter: */
+ gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection: */
+ gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits. This lets us get
+ * useful information on failure:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting: */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Turn on the power counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
+
+ /* Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
+
+ /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
+ gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
+
+ /* Enable Clock gating: */
+ if (adreno_is_a306(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+ else if (adreno_is_a320(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+ else if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbffcffff);
+
+ if (adreno_is_a330v2(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x05515455);
+ else if (adreno_is_a330(adreno_gpu))
+ gpu_write(gpu, REG_A3XX_RBBM_GPR0_CTL, 0x00000000);
+
+ /* Set the OCMEM base address for A330, etc */
+ if (a3xx_gpu->ocmem.hdl) {
+ gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a3xx_gpu->ocmem.base >> 14));
+ }
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Enable the perfcntrs that we use.. */
+ for (i = 0; i < gpu->num_perfcntrs; i++) {
+ const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
+ gpu_write(gpu, perfcntr->select_reg, perfcntr->select_val);
+ }
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
+
+ /* RB registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
+
+ /* VBIF registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
+ if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
+ adreno_is_a320(adreno_gpu)) {
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* NOTE: this (value take from downstream android driver)
+ * includes some bits outside of the known bitfields. But
+ * A330 has this "MERCIU queue" thing too, which might
+ * explain a new bitfield or reshuffling:
+ */
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x003e2008);
+ }
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a3xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a3xx_dump(gpu);
+
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a3xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ adreno_gpu_ocmem_cleanup(&a3xx_gpu->ocmem);
+
+ kfree(a3xx_gpu);
+}
+
+static bool a3xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
+ DBG("%s: %08x", gpu->name, status);
+
+ // TODO
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a3xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
+ 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
+ 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
+ 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
+ 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
+ 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
+ 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
+ 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
+ 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
+ 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
+ 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
+ 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
+ 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
+ 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
+ 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
+ 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
+ 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
+ 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
+ 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
+ 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
+ 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
+ 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
+ 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
+ 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
+ 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
+ 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+ 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
+ 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a3xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A3XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
+static u64 a3xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ u64 busy_cycles;
+
+ busy_cycles = gpu_read64(gpu, REG_A3XX_RBBM_PERFCTR_RBBM_1_LO);
+ *out_sample_rate = clk_get_rate(gpu->core_clk);
+
+ return busy_cycles;
+}
+
+static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a3xx_recover,
+ .submit = a3xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a3xx_gpu_busy,
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_create_address_space,
+ .get_rptr = a3xx_get_rptr,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+ { REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
+ SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
+ { REG_A3XX_SP_PERFCOUNTER7_SELECT, REG_A3XX_RBBM_PERFCTR_SP_7_LO,
+ SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
+};
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+{
+ struct a3xx_gpu *a3xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct icc_path *ocmem_icc_path;
+ struct icc_path *icc_path;
+ int ret;
+
+ if (!pdev) {
+ DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
+ if (!a3xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a3xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ adreno_gpu->registers = a3xx_registers;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ /* if needed, allocate gmem: */
+ if (adreno_is_a330(adreno_gpu)) {
+ ret = adreno_gpu_ocmem_init(&adreno_gpu->base.pdev->dev,
+ adreno_gpu, &a3xx_gpu->ocmem);
+ if (ret)
+ goto fail;
+ }
+
+ if (!gpu->aspace) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
+ }
+
+ icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
+ goto fail;
+ }
+
+ ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
+ /* allow -ENODATA, ocmem icc is optional */
+ if (ret != -ENODATA)
+ goto fail;
+ ocmem_icc_path = NULL;
+ }
+
+
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
+ return gpu;
+
+fail:
+ if (a3xx_gpu)
+ a3xx_destroy(&a3xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 0000000000..c555fb13e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#ifndef __A3XX_GPU_H__
+#define __A3XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a3xx.xml.h"
+
+struct a3xx_gpu {
+ struct adreno_gpu base;
+
+ /* if OCMEM is used for GMEM: */
+ struct adreno_ocmem ocmem;
+};
+#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+
+#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
new file mode 100644
index 0000000000..ff5f1e98a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -0,0 +1,4361 @@
+#ifndef A4XX_XML
+#define A4XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2022 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a4xx_color_fmt {
+ RB4_A8_UNORM = 1,
+ RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
+ RB4_R4G4B4A4_UNORM = 8,
+ RB4_R5G5B5A1_UNORM = 10,
+ RB4_R5G6B5_UNORM = 14,
+ RB4_R8G8_UNORM = 15,
+ RB4_R8G8_SNORM = 16,
+ RB4_R8G8_UINT = 17,
+ RB4_R8G8_SINT = 18,
+ RB4_R16_UNORM = 19,
+ RB4_R16_SNORM = 20,
+ RB4_R16_FLOAT = 21,
+ RB4_R16_UINT = 22,
+ RB4_R16_SINT = 23,
+ RB4_R8G8B8_UNORM = 25,
+ RB4_R8G8B8A8_UNORM = 26,
+ RB4_R8G8B8A8_SNORM = 28,
+ RB4_R8G8B8A8_UINT = 29,
+ RB4_R8G8B8A8_SINT = 30,
+ RB4_R10G10B10A2_UNORM = 31,
+ RB4_R10G10B10A2_UINT = 34,
+ RB4_R11G11B10_FLOAT = 39,
+ RB4_R16G16_UNORM = 40,
+ RB4_R16G16_SNORM = 41,
+ RB4_R16G16_FLOAT = 42,
+ RB4_R16G16_UINT = 43,
+ RB4_R16G16_SINT = 44,
+ RB4_R32_FLOAT = 45,
+ RB4_R32_UINT = 46,
+ RB4_R32_SINT = 47,
+ RB4_R16G16B16A16_UNORM = 52,
+ RB4_R16G16B16A16_SNORM = 53,
+ RB4_R16G16B16A16_FLOAT = 54,
+ RB4_R16G16B16A16_UINT = 55,
+ RB4_R16G16B16A16_SINT = 56,
+ RB4_R32G32_FLOAT = 57,
+ RB4_R32G32_UINT = 58,
+ RB4_R32G32_SINT = 59,
+ RB4_R32G32B32A32_FLOAT = 60,
+ RB4_R32G32B32A32_UINT = 61,
+ RB4_R32G32B32A32_SINT = 62,
+ RB4_NONE = 255,
+};
+
+enum a4xx_tile_mode {
+ TILE4_LINEAR = 0,
+ TILE4_2 = 2,
+ TILE4_3 = 3,
+};
+
+enum a4xx_vtx_fmt {
+ VFMT4_32_FLOAT = 1,
+ VFMT4_32_32_FLOAT = 2,
+ VFMT4_32_32_32_FLOAT = 3,
+ VFMT4_32_32_32_32_FLOAT = 4,
+ VFMT4_16_FLOAT = 5,
+ VFMT4_16_16_FLOAT = 6,
+ VFMT4_16_16_16_FLOAT = 7,
+ VFMT4_16_16_16_16_FLOAT = 8,
+ VFMT4_32_FIXED = 9,
+ VFMT4_32_32_FIXED = 10,
+ VFMT4_32_32_32_FIXED = 11,
+ VFMT4_32_32_32_32_FIXED = 12,
+ VFMT4_11_11_10_FLOAT = 13,
+ VFMT4_16_SINT = 16,
+ VFMT4_16_16_SINT = 17,
+ VFMT4_16_16_16_SINT = 18,
+ VFMT4_16_16_16_16_SINT = 19,
+ VFMT4_16_UINT = 20,
+ VFMT4_16_16_UINT = 21,
+ VFMT4_16_16_16_UINT = 22,
+ VFMT4_16_16_16_16_UINT = 23,
+ VFMT4_16_SNORM = 24,
+ VFMT4_16_16_SNORM = 25,
+ VFMT4_16_16_16_SNORM = 26,
+ VFMT4_16_16_16_16_SNORM = 27,
+ VFMT4_16_UNORM = 28,
+ VFMT4_16_16_UNORM = 29,
+ VFMT4_16_16_16_UNORM = 30,
+ VFMT4_16_16_16_16_UNORM = 31,
+ VFMT4_32_UINT = 32,
+ VFMT4_32_32_UINT = 33,
+ VFMT4_32_32_32_UINT = 34,
+ VFMT4_32_32_32_32_UINT = 35,
+ VFMT4_32_SINT = 36,
+ VFMT4_32_32_SINT = 37,
+ VFMT4_32_32_32_SINT = 38,
+ VFMT4_32_32_32_32_SINT = 39,
+ VFMT4_8_UINT = 40,
+ VFMT4_8_8_UINT = 41,
+ VFMT4_8_8_8_UINT = 42,
+ VFMT4_8_8_8_8_UINT = 43,
+ VFMT4_8_UNORM = 44,
+ VFMT4_8_8_UNORM = 45,
+ VFMT4_8_8_8_UNORM = 46,
+ VFMT4_8_8_8_8_UNORM = 47,
+ VFMT4_8_SINT = 48,
+ VFMT4_8_8_SINT = 49,
+ VFMT4_8_8_8_SINT = 50,
+ VFMT4_8_8_8_8_SINT = 51,
+ VFMT4_8_SNORM = 52,
+ VFMT4_8_8_SNORM = 53,
+ VFMT4_8_8_8_SNORM = 54,
+ VFMT4_8_8_8_8_SNORM = 55,
+ VFMT4_10_10_10_2_UINT = 56,
+ VFMT4_10_10_10_2_UNORM = 57,
+ VFMT4_10_10_10_2_SINT = 58,
+ VFMT4_10_10_10_2_SNORM = 59,
+ VFMT4_2_10_10_10_UINT = 60,
+ VFMT4_2_10_10_10_UNORM = 61,
+ VFMT4_2_10_10_10_SINT = 62,
+ VFMT4_2_10_10_10_SNORM = 63,
+ VFMT4_NONE = 255,
+};
+
+enum a4xx_tex_fmt {
+ TFMT4_A8_UNORM = 3,
+ TFMT4_8_UNORM = 4,
+ TFMT4_8_SNORM = 5,
+ TFMT4_8_UINT = 6,
+ TFMT4_8_SINT = 7,
+ TFMT4_4_4_4_4_UNORM = 8,
+ TFMT4_5_5_5_1_UNORM = 9,
+ TFMT4_5_6_5_UNORM = 11,
+ TFMT4_L8_A8_UNORM = 13,
+ TFMT4_8_8_UNORM = 14,
+ TFMT4_8_8_SNORM = 15,
+ TFMT4_8_8_UINT = 16,
+ TFMT4_8_8_SINT = 17,
+ TFMT4_16_UNORM = 18,
+ TFMT4_16_SNORM = 19,
+ TFMT4_16_FLOAT = 20,
+ TFMT4_16_UINT = 21,
+ TFMT4_16_SINT = 22,
+ TFMT4_8_8_8_8_UNORM = 28,
+ TFMT4_8_8_8_8_SNORM = 29,
+ TFMT4_8_8_8_8_UINT = 30,
+ TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_9_9_9_E5_FLOAT = 32,
+ TFMT4_10_10_10_2_UNORM = 33,
+ TFMT4_10_10_10_2_UINT = 34,
+ TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_16_16_UNORM = 38,
+ TFMT4_16_16_SNORM = 39,
+ TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_UINT = 41,
+ TFMT4_16_16_SINT = 42,
+ TFMT4_32_FLOAT = 43,
+ TFMT4_32_UINT = 44,
+ TFMT4_32_SINT = 45,
+ TFMT4_16_16_16_16_UNORM = 51,
+ TFMT4_16_16_16_16_SNORM = 52,
+ TFMT4_16_16_16_16_FLOAT = 53,
+ TFMT4_16_16_16_16_UINT = 54,
+ TFMT4_16_16_16_16_SINT = 55,
+ TFMT4_32_32_FLOAT = 56,
+ TFMT4_32_32_UINT = 57,
+ TFMT4_32_32_SINT = 58,
+ TFMT4_32_32_32_FLOAT = 59,
+ TFMT4_32_32_32_UINT = 60,
+ TFMT4_32_32_32_SINT = 61,
+ TFMT4_32_32_32_32_FLOAT = 63,
+ TFMT4_32_32_32_32_UINT = 64,
+ TFMT4_32_32_32_32_SINT = 65,
+ TFMT4_X8Z24_UNORM = 71,
+ TFMT4_DXT1 = 86,
+ TFMT4_DXT3 = 87,
+ TFMT4_DXT5 = 88,
+ TFMT4_RGTC1_UNORM = 90,
+ TFMT4_RGTC1_SNORM = 91,
+ TFMT4_RGTC2_UNORM = 94,
+ TFMT4_RGTC2_SNORM = 95,
+ TFMT4_BPTC_UFLOAT = 97,
+ TFMT4_BPTC_FLOAT = 98,
+ TFMT4_BPTC = 99,
+ TFMT4_ATC_RGB = 100,
+ TFMT4_ATC_RGBA_EXPLICIT = 101,
+ TFMT4_ATC_RGBA_INTERPOLATED = 102,
+ TFMT4_ETC2_RG11_UNORM = 103,
+ TFMT4_ETC2_RG11_SNORM = 104,
+ TFMT4_ETC2_R11_UNORM = 105,
+ TFMT4_ETC2_R11_SNORM = 106,
+ TFMT4_ETC1 = 107,
+ TFMT4_ETC2_RGB8 = 108,
+ TFMT4_ETC2_RGBA8 = 109,
+ TFMT4_ETC2_RGB8A1 = 110,
+ TFMT4_ASTC_4x4 = 111,
+ TFMT4_ASTC_5x4 = 112,
+ TFMT4_ASTC_5x5 = 113,
+ TFMT4_ASTC_6x5 = 114,
+ TFMT4_ASTC_6x6 = 115,
+ TFMT4_ASTC_8x5 = 116,
+ TFMT4_ASTC_8x6 = 117,
+ TFMT4_ASTC_8x8 = 118,
+ TFMT4_ASTC_10x5 = 119,
+ TFMT4_ASTC_10x6 = 120,
+ TFMT4_ASTC_10x8 = 121,
+ TFMT4_ASTC_10x10 = 122,
+ TFMT4_ASTC_12x10 = 123,
+ TFMT4_ASTC_12x12 = 124,
+ TFMT4_NONE = 255,
+};
+
+enum a4xx_depth_format {
+ DEPTH4_NONE = 0,
+ DEPTH4_16 = 1,
+ DEPTH4_24_8 = 2,
+ DEPTH4_32 = 3,
+};
+
+enum a4xx_ccu_perfcounter_select {
+ CCU_BUSY_CYCLES = 0,
+ CCU_RB_DEPTH_RETURN_STALL = 2,
+ CCU_RB_COLOR_RETURN_STALL = 3,
+ CCU_DEPTH_BLOCKS = 6,
+ CCU_COLOR_BLOCKS = 7,
+ CCU_DEPTH_BLOCK_HIT = 8,
+ CCU_COLOR_BLOCK_HIT = 9,
+ CCU_DEPTH_FLAG1_COUNT = 10,
+ CCU_DEPTH_FLAG2_COUNT = 11,
+ CCU_DEPTH_FLAG3_COUNT = 12,
+ CCU_DEPTH_FLAG4_COUNT = 13,
+ CCU_COLOR_FLAG1_COUNT = 14,
+ CCU_COLOR_FLAG2_COUNT = 15,
+ CCU_COLOR_FLAG3_COUNT = 16,
+ CCU_COLOR_FLAG4_COUNT = 17,
+ CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_BUSY = 1,
+ CP_PFP_IDLE = 2,
+ CP_PFP_BUSY_WORKING = 3,
+ CP_PFP_STALL_CYCLES_ANY = 4,
+ CP_PFP_STARVE_CYCLES_ANY = 5,
+ CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+ CP_PFP_STALLED_PER_STORE_ADDR = 7,
+ CP_PFP_PC_PROFILE = 8,
+ CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ CP_PFP_COND_INDIRECT_DISCARDED = 10,
+ CP_LONG_RESUMPTIONS = 11,
+ CP_RESUME_CYCLES = 12,
+ CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+ CP_LONG_PREEMPTIONS = 14,
+ CP_PREEMPT_CYCLES = 15,
+ CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+ CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+ CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+ CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+ CP_ME_FIFO_FULL_ME_BUSY = 20,
+ CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+ CP_ME_WAITING_FOR_PACKETS = 22,
+ CP_ME_BUSY_WORKING = 23,
+ CP_ME_STARVE_CYCLES_ANY = 24,
+ CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+ CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+ CP_ME_PC_PROFILE = 27,
+ CP_RCIU_FIFO_EMPTY = 28,
+ CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+ CP_RCIU_FIFO_FULL = 30,
+ CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+ CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+ CP_RCIU_FIFO_FULL_OTHER = 33,
+ CP_AHB_IDLE = 34,
+ CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+ CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+ CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+ CP_AHB_BUSY_WORKING = 38,
+ CP_AHB_BUSY_STALL_ON_HRDY = 39,
+ CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+ RAS_SUPER_TILES = 0,
+ RAS_8X8_TILES = 1,
+ RAS_4X4_TILES = 2,
+ RAS_BUSY_CYCLES = 3,
+ RAS_STALL_CYCLES_BY_RB = 4,
+ RAS_STALL_CYCLES_BY_VSC = 5,
+ RAS_STARVE_CYCLES_BY_TSE = 6,
+ RAS_SUPERTILE_CYCLES = 7,
+ RAS_TILE_CYCLES = 8,
+ RAS_FULLY_COVERED_SUPER_TILES = 9,
+ RAS_FULLY_COVERED_8X8_TILES = 10,
+ RAS_4X4_PRIM = 11,
+ RAS_8X4_4X8_PRIM = 12,
+ RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+ TSE_INPUT_PRIM = 0,
+ TSE_INPUT_NULL_PRIM = 1,
+ TSE_TRIVAL_REJ_PRIM = 2,
+ TSE_CLIPPED_PRIM = 3,
+ TSE_NEW_PRIM = 4,
+ TSE_ZERO_AREA_PRIM = 5,
+ TSE_FACENESS_CULLED_PRIM = 6,
+ TSE_ZERO_PIXEL_PRIM = 7,
+ TSE_OUTPUT_NULL_PRIM = 8,
+ TSE_OUTPUT_VISIBLE_PRIM = 9,
+ TSE_PRE_CLIP_PRIM = 10,
+ TSE_POST_CLIP_PRIM = 11,
+ TSE_BUSY_CYCLES = 12,
+ TSE_PC_STARVE = 13,
+ TSE_RAS_STALL = 14,
+ TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+ TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+ HLSQ_SP_VS_STAGE_CONSTANT = 0,
+ HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+ HLSQ_SP_FS_STAGE_CONSTANT = 2,
+ HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+ HLSQ_TP_STATE = 4,
+ HLSQ_QUADS = 5,
+ HLSQ_PIXELS = 6,
+ HLSQ_VERTICES = 7,
+ HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+ HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+ HLSQ_BUSY_CYCLES = 15,
+ HLSQ_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+ HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+ HLSQ_STALL_CYCLES_UCHE = 19,
+ HLSQ_RBBM_LOAD_CYCLES = 20,
+ HLSQ_DI_TO_VS_START_SP = 21,
+ HLSQ_DI_TO_FS_START_SP = 22,
+ HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+ HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+ HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+ HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+ HLSQ_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_UCHE_LATENCY_COUNT = 28,
+ HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+ PC_VIS_STREAMS_LOADED = 0,
+ PC_VPC_PRIMITIVES = 2,
+ PC_DEAD_PRIM = 3,
+ PC_LIVE_PRIM = 4,
+ PC_DEAD_DRAWCALLS = 5,
+ PC_LIVE_DRAWCALLS = 6,
+ PC_VERTEX_MISSES = 7,
+ PC_STALL_CYCLES_VFD = 9,
+ PC_STALL_CYCLES_TSE = 10,
+ PC_STALL_CYCLES_UCHE = 11,
+ PC_WORKING_CYCLES = 12,
+ PC_IA_VERTICES = 13,
+ PC_GS_PRIMITIVES = 14,
+ PC_HS_INVOCATIONS = 15,
+ PC_DS_INVOCATIONS = 16,
+ PC_DS_PRIMITIVES = 17,
+ PC_STARVE_CYCLES_FOR_INDEX = 20,
+ PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+ PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+ PC_STALL_CYCLES_TESS = 23,
+ PC_STARVE_CYCLES_FOR_POSITION = 24,
+ PC_MODE0_DRAWCALL = 25,
+ PC_MODE1_DRAWCALL = 26,
+ PC_MODE2_DRAWCALL = 27,
+ PC_MODE3_DRAWCALL = 28,
+ PC_MODE4_DRAWCALL = 29,
+ PC_PREDICATED_DEAD_DRAWCALL = 30,
+ PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+ PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+ PC_VPC_POS_DATA_TRANSACTION = 33,
+ PC_BUSY_CYCLES = 34,
+ PC_STARVE_CYCLES_DI = 35,
+ PC_STALL_CYCLES_VPC = 36,
+ TESS_WORKING_CYCLES = 37,
+ TESS_NUM_CYCLES_SETUP_WORKING = 38,
+ TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+ TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+ TESS_BUSY_CYCLES = 41,
+ TESS_STARVE_CYCLES_PC = 42,
+ TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+ PWR_CORE_CLOCK_CYCLES = 0,
+ PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+ RB_BUSY_CYCLES = 0,
+ RB_BUSY_CYCLES_BINNING = 1,
+ RB_BUSY_CYCLES_RENDERING = 2,
+ RB_BUSY_CYCLES_RESOLVE = 3,
+ RB_STARVE_CYCLES_BY_SP = 4,
+ RB_STARVE_CYCLES_BY_RAS = 5,
+ RB_STARVE_CYCLES_BY_MARB = 6,
+ RB_STALL_CYCLES_BY_MARB = 7,
+ RB_STALL_CYCLES_BY_HLSQ = 8,
+ RB_RB_RB_MARB_DATA = 9,
+ RB_SP_RB_QUAD = 10,
+ RB_RAS_RB_Z_QUADS = 11,
+ RB_GMEM_CH0_READ = 12,
+ RB_GMEM_CH1_READ = 13,
+ RB_GMEM_CH0_WRITE = 14,
+ RB_GMEM_CH1_WRITE = 15,
+ RB_CP_CONTEXT_DONE = 16,
+ RB_CP_CACHE_FLUSH = 17,
+ RB_CP_ZPASS_DONE = 18,
+ RB_STALL_FIFO0_FULL = 19,
+ RB_STALL_FIFO1_FULL = 20,
+ RB_STALL_FIFO2_FULL = 21,
+ RB_STALL_FIFO3_FULL = 22,
+ RB_RB_HLSQ_TRANSACTIONS = 23,
+ RB_Z_READ = 24,
+ RB_Z_WRITE = 25,
+ RB_C_READ = 26,
+ RB_C_WRITE = 27,
+ RB_C_READ_LATENCY = 28,
+ RB_Z_READ_LATENCY = 29,
+ RB_STALL_BY_UCHE = 30,
+ RB_MARB_UCHE_TRANSACTIONS = 31,
+ RB_CACHE_STALL_MISS = 32,
+ RB_CACHE_STALL_FIFO_FULL = 33,
+ RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+ RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+ RB_SAMPLER_UNITS_ACTIVE = 36,
+ RB_TOTAL_PASS = 38,
+ RB_Z_PASS = 39,
+ RB_Z_FAIL = 40,
+ RB_S_FAIL = 41,
+ RB_POWER0 = 42,
+ RB_POWER1 = 43,
+ RB_POWER2 = 44,
+ RB_POWER3 = 45,
+ RB_POWER4 = 46,
+ RB_POWER5 = 47,
+ RB_POWER6 = 48,
+ RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+ RBBM_ALWAYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TPL1_BUSY = 12,
+ RBBM_ANY_SP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_STATUS_MASKED = 22,
+ RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+ RBBM_TESS_BUSY = 24,
+ RBBM_COM_BUSY = 25,
+ RBBM_DCOM_BUSY = 32,
+ RBBM_ANY_CCU_BUSY = 33,
+ RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_GM_LOAD_INSTRUCTIONS = 3,
+ SP_GM_STORE_INSTRUCTIONS = 4,
+ SP_GM_ATOMICS = 5,
+ SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+ SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+ SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+ SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+ SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_WORKING_CYCLES = 29,
+ SP_EFU_WORKING_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_BUSY_CYCLES = 35,
+ SP_HS_INSTRUCTIONS = 36,
+ SP_DS_INSTRUCTIONS = 37,
+ SP_GS_INSTRUCTIONS = 38,
+ SP_CS_INSTRUCTIONS = 39,
+ SP_SCHEDULER_NON_WORKING = 40,
+ SP_WAVE_CONTEXTS = 41,
+ SP_WAVE_CONTEXT_CYCLES = 42,
+ SP_POWER0 = 43,
+ SP_POWER1 = 44,
+ SP_POWER2 = 45,
+ SP_POWER3 = 46,
+ SP_POWER4 = 47,
+ SP_POWER5 = 48,
+ SP_POWER6 = 49,
+ SP_POWER7 = 50,
+ SP_POWER8 = 51,
+ SP_POWER9 = 52,
+ SP_POWER10 = 53,
+ SP_POWER11 = 54,
+ SP_POWER12 = 55,
+ SP_POWER13 = 56,
+ SP_POWER14 = 57,
+ SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+ TP_L1_REQUESTS = 0,
+ TP_L1_MISSES = 1,
+ TP_QUADS_OFFSET = 8,
+ TP_QUAD_SHADOW = 9,
+ TP_QUADS_ARRAY = 10,
+ TP_QUADS_GRADIENT = 11,
+ TP_QUADS_1D2D = 12,
+ TP_QUADS_3DCUBE = 13,
+ TP_BUSY_CYCLES = 16,
+ TP_STALL_CYCLES_BY_ARB = 17,
+ TP_STATE_CACHE_REQUESTS = 20,
+ TP_STATE_CACHE_MISSES = 21,
+ TP_POWER0 = 22,
+ TP_POWER1 = 23,
+ TP_POWER2 = 24,
+ TP_POWER3 = 25,
+ TP_POWER4 = 26,
+ TP_POWER5 = 27,
+ TP_POWER6 = 28,
+ TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+ UCHE_VBIF_READ_BEATS_TP = 0,
+ UCHE_VBIF_READ_BEATS_VFD = 1,
+ UCHE_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_VBIF_READ_BEATS_MARB = 3,
+ UCHE_VBIF_READ_BEATS_SP = 4,
+ UCHE_READ_REQUESTS_TP = 5,
+ UCHE_READ_REQUESTS_VFD = 6,
+ UCHE_READ_REQUESTS_HLSQ = 7,
+ UCHE_READ_REQUESTS_MARB = 8,
+ UCHE_READ_REQUESTS_SP = 9,
+ UCHE_WRITE_REQUESTS_MARB = 10,
+ UCHE_WRITE_REQUESTS_SP = 11,
+ UCHE_TAG_CHECK_FAILS = 12,
+ UCHE_EVICTS = 13,
+ UCHE_FLUSHES = 14,
+ UCHE_VBIF_LATENCY_CYCLES = 15,
+ UCHE_VBIF_LATENCY_SAMPLES = 16,
+ UCHE_BUSY_CYCLES = 17,
+ UCHE_VBIF_READ_BEATS_PC = 18,
+ UCHE_READ_REQUESTS_PC = 19,
+ UCHE_WRITE_REQUESTS_VPC = 20,
+ UCHE_STALL_BY_VBIF = 21,
+ UCHE_WRITE_REQUESTS_VSC = 22,
+ UCHE_POWER0 = 23,
+ UCHE_POWER1 = 24,
+ UCHE_POWER2 = 25,
+ UCHE_POWER3 = 26,
+ UCHE_POWER4 = 27,
+ UCHE_POWER5 = 28,
+ UCHE_POWER6 = 29,
+ UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+ CYCLES_HELD_OFF_ID_0 = 86,
+ CYCLES_HELD_OFF_ID_1 = 87,
+ CYCLES_HELD_OFF_ID_2 = 88,
+ CYCLES_HELD_OFF_ID_3 = 89,
+ CYCLES_HELD_OFF_ID_4 = 90,
+ CYCLES_HELD_OFF_ID_5 = 91,
+ CYCLES_HELD_OFF_ID_6 = 92,
+ CYCLES_HELD_OFF_ID_7 = 93,
+ CYCLES_HELD_OFF_ID_8 = 94,
+ CYCLES_HELD_OFF_ID_9 = 95,
+ CYCLES_HELD_OFF_ID_10 = 96,
+ CYCLES_HELD_OFF_ID_11 = 97,
+ CYCLES_HELD_OFF_ID_12 = 98,
+ CYCLES_HELD_OFF_ID_13 = 99,
+ CYCLES_HELD_OFF_ID_14 = 100,
+ CYCLES_HELD_OFF_ID_15 = 101,
+ AXI_READ_REQUEST_HELD_OFF = 102,
+ AXI_WRITE_REQUEST_HELD_OFF = 103,
+ AXI_REQUEST_HELD_OFF = 104,
+ AXI_WRITE_DATA_HELD_OFF = 105,
+ OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+ OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+ OCMEM_AXI_REQUEST_HELD_OFF = 108,
+ OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+ ELAPSED_CYCLES_DDR = 110,
+ ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+ VFD_UCHE_BYTE_FETCHED = 0,
+ VFD_UCHE_TRANS = 1,
+ VFD_FETCH_INSTRUCTIONS = 3,
+ VFD_BUSY_CYCLES = 5,
+ VFD_STALL_CYCLES_UCHE = 6,
+ VFD_STALL_CYCLES_HLSQ = 7,
+ VFD_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_STALL_CYCLES_VPC_ALLOC = 9,
+ VFD_MODE_0_FIBERS = 13,
+ VFD_MODE_1_FIBERS = 14,
+ VFD_MODE_2_FIBERS = 15,
+ VFD_MODE_3_FIBERS = 16,
+ VFD_MODE_4_FIBERS = 17,
+ VFD_BFIFO_STALL = 18,
+ VFD_NUM_VERTICES_TOTAL = 19,
+ VFD_PACKER_FULL = 20,
+ VFD_UCHE_REQUEST_FIFO_FULL = 21,
+ VFD_STARVE_CYCLES_PC = 22,
+ VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+ VPC_SP_LM_COMPONENTS = 2,
+ VPC_SP0_LM_BYTES = 3,
+ VPC_SP1_LM_BYTES = 4,
+ VPC_SP2_LM_BYTES = 5,
+ VPC_SP3_LM_BYTES = 6,
+ VPC_WORKING_CYCLES = 7,
+ VPC_STALL_CYCLES_LM = 8,
+ VPC_STARVE_CYCLES_RAS = 9,
+ VPC_STREAMOUT_CYCLES = 10,
+ VPC_UCHE_TRANSACTIONS = 12,
+ VPC_STALL_CYCLES_UCHE = 13,
+ VPC_BUSY_CYCLES = 14,
+ VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+ VSC_BUSY_CYCLES = 0,
+ VSC_WORKING_CYCLES = 1,
+ VSC_STALL_CYCLES_UCHE = 2,
+ VSC_STARVE_CYCLES_RAS = 3,
+ VSC_EOT_NUM = 4,
+};
+
+enum a4xx_tex_filter {
+ A4XX_TEX_NEAREST = 0,
+ A4XX_TEX_LINEAR = 1,
+ A4XX_TEX_ANISO = 2,
+};
+
+enum a4xx_tex_clamp {
+ A4XX_TEX_REPEAT = 0,
+ A4XX_TEX_CLAMP_TO_EDGE = 1,
+ A4XX_TEX_MIRROR_REPEAT = 2,
+ A4XX_TEX_CLAMP_TO_BORDER = 3,
+ A4XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a4xx_tex_aniso {
+ A4XX_TEX_ANISO_1 = 0,
+ A4XX_TEX_ANISO_2 = 1,
+ A4XX_TEX_ANISO_4 = 2,
+ A4XX_TEX_ANISO_8 = 3,
+ A4XX_TEX_ANISO_16 = 4,
+};
+
+enum a4xx_tex_swiz {
+ A4XX_TEX_X = 0,
+ A4XX_TEX_Y = 1,
+ A4XX_TEX_Z = 2,
+ A4XX_TEX_W = 3,
+ A4XX_TEX_ZERO = 4,
+ A4XX_TEX_ONE = 5,
+};
+
+enum a4xx_tex_type {
+ A4XX_TEX_1D = 0,
+ A4XX_TEX_2D = 1,
+ A4XX_TEX_CUBE = 2,
+ A4XX_TEX_3D = 3,
+ A4XX_TEX_BUFFER = 4,
+};
+
+#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000
+#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20
+static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
+{
+ return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+}
+#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A4XX_INT0_VFD_ERROR 0x00000040
+#define A4XX_INT0_CP_SW_INT 0x00000080
+#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A4XX_INT0_CP_HW_FAULT 0x00000800
+#define A4XX_INT0_CP_DMA 0x00001000
+#define A4XX_INT0_CP_IB2_INT 0x00002000
+#define A4XX_INT0_CP_IB1_INT 0x00004000
+#define A4XX_INT0_CP_RB_INT 0x00008000
+#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A4XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A4XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A4XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd
+
+#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
+
+#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
+}
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000
+#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16
+static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
+}
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce
+
+#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf
+
+#define REG_A4XX_RB_MODE_CONTROL 0x000020a0
+#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f
+#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
+static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
+}
+#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
+#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
+static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
+}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
+
+#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
+#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
+#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020
+
+#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2
+#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000
+#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13
+static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_CONTROL2 0x000020a3
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK 0x0000000f
+#define A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT 0
+static inline uint32_t A4XX_RB_RENDER_CONTROL2_COORD_MASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_CONTROL2_COORD_MASK__SHIFT) & A4XX_RB_RENDER_CONTROL2_COORD_MASK__MASK;
+}
+#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010
+#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020
+#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040
+#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK;
+}
+#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_PIXEL 0x00001000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_CENTROID 0x00002000
+#define A4XX_RB_RENDER_CONTROL2_IJ_PERSP_SAMPLE 0x00004000
+#define A4XX_RB_RENDER_CONTROL2_SIZE 0x00008000
+
+static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
+#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a4xx_tile_mode val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
+#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xffffc000
+#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
+static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
+
+static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
+#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x03fffff8
+#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
+static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_RED 0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
+#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK 0x000000ff
+#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
+}
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
+#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
+}
+
+#define REG_A4XX_RB_SAMPLE_COUNT_CONTROL 0x000020fa
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK 0xfffffffc
+#define A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT 2
+static inline uint32_t A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__SHIFT) & A4XX_RB_SAMPLE_COUNT_CONTROL_ADDR__MASK;
+}
+
+#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
+#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A4XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
+#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xffffffe0
+#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 5
+static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000
+#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24
+static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
+{
+ return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+
+#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
+#define A4XX_RB_FS_OUTPUT_REG_MRT__MASK 0x0000000f
+#define A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT 0
+static inline uint32_t A4XX_RB_FS_OUTPUT_REG_MRT(uint32_t val)
+{
+ return ((val) << A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_RB_FS_OUTPUT_REG_MRT__MASK;
+}
+#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
+
+#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
+#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
+#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x00000002
+#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
+#define A4XX_RB_DEPTH_CONTROL_Z_READ_ENABLE 0x80000000
+
+#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
+
+#define REG_A4XX_RB_DEPTH_INFO 0x00002103
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
+#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
+#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
+#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff
+#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
+static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
+#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
+
+#define REG_A4XX_RB_STENCIL_INFO 0x00002108
+#define A4XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A4XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A4XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A4XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A4XX_RB_STENCIL_PITCH 0x00002109
+#define A4XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A4XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A4XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_RB_STENCIL_PITCH__SHIFT) & A4XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
+#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A4XX_RB_BIN_OFFSET 0x0000210d
+#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff
+#define A4XX_RB_BIN_OFFSET_X__SHIFT 0
+static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
+}
+#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000
+#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16
+static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
+}
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP(uint32_t i0) { return 0x00002120 + 0x2*i0; }
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MIN(uint32_t i0) { return 0x00002120 + 0x2*i0; }
+
+static inline uint32_t REG_A4XX_RB_VPORT_Z_CLAMP_MAX(uint32_t i0) { return 0x00002121 + 0x2*i0; }
+
+#define REG_A4XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014
+
+#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015
+
+#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016
+
+#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017
+
+#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019
+
+#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b
+
+#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c
+
+#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d
+
+#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f
+
+#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020
+
+#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021
+
+#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022
+
+#define REG_A4XX_RBBM_AHB_CTL0 0x00000023
+
+#define REG_A4XX_RBBM_AHB_CTL1 0x00000024
+
+#define REG_A4XX_RBBM_AHB_CMD 0x00000025
+
+#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026
+
+#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f
+
+#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034
+
+#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036
+
+#define REG_A4XX_RBBM_INT_0_MASK 0x00000037
+
+#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e
+
+#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f
+
+#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041
+
+#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042
+
+#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047
+
+#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c
+
+#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+
+#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
+
+#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080
+
+#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081
+
+#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a
+
+#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c
+
+#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
+
+#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174
+
+#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179
+
+#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
+
+#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
+
+#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182
+
+#define REG_A4XX_RBBM_AHB_STATUS 0x00000189
+
+#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c
+
+#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d
+
+#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f
+
+#define REG_A4XX_RBBM_STATUS 0x00000191
+#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+
+#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8
+
+#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
+
+#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
+
+#define REG_A4XX_CP_RB_BASE 0x00000200
+
+#define REG_A4XX_CP_RB_CNTL 0x00000201
+
+#define REG_A4XX_CP_RB_WPTR 0x00000205
+
+#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203
+
+#define REG_A4XX_CP_RB_RPTR 0x00000204
+
+#define REG_A4XX_CP_IB1_BASE 0x00000206
+
+#define REG_A4XX_CP_IB1_BUFSZ 0x00000207
+
+#define REG_A4XX_CP_IB2_BASE 0x00000208
+
+#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
+
+#define REG_A4XX_CP_ME_NRT_ADDR 0x0000020c
+
+#define REG_A4XX_CP_ME_NRT_DATA 0x0000020d
+
+#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
+
+#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
+
+#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b
+
+#define REG_A4XX_CP_ROQ_ADDR 0x0000021c
+
+#define REG_A4XX_CP_ROQ_DATA 0x0000021d
+
+#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
+
+#define REG_A4XX_CP_MEQ_DATA 0x0000021f
+
+#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
+
+#define REG_A4XX_CP_MERCIU_DATA 0x00000221
+
+#define REG_A4XX_CP_MERCIU_DATA2 0x00000222
+
+#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223
+
+#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224
+
+#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225
+
+#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226
+
+#define REG_A4XX_CP_ME_RAM_DATA 0x00000227
+
+#define REG_A4XX_CP_PREEMPT 0x0000022a
+
+#define REG_A4XX_CP_CNTL 0x0000022c
+
+#define REG_A4XX_CP_ME_CNTL 0x0000022d
+
+#define REG_A4XX_CP_DEBUG 0x0000022e
+
+#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231
+
+#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
+
+static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
+static inline uint32_t A4XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A4XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
+
+#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
+
+#define REG_A4XX_CP_ST_BASE 0x000004c0
+
+#define REG_A4XX_CP_STQ_AVAIL 0x000004ce
+
+#define REG_A4XX_CP_MERCIU_STAT 0x000004d0
+
+#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2
+
+#define REG_A4XX_CP_HW_FAULT 0x000004d8
+
+#define REG_A4XX_CP_PROTECT_STATUS 0x000004da
+
+#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507
+
+#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
+
+static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
+
+#define REG_A4XX_SP_VS_STATUS 0x00000ec0
+
+#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
+
+#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
+#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
+
+#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
+#define A4XX_SP_INSTR_CACHE_CTRL_VS_BUFFER 0x00000080
+#define A4XX_SP_INSTR_CACHE_CTRL_FS_BUFFER 0x00000100
+#define A4XX_SP_INSTR_CACHE_CTRL_INSTR_BUFFER 0x00000400
+
+#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
+#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6
+#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_VS_OBJ_START 0x000022e1
+
+#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2
+
+#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3
+
+#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5
+
+#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A4XX_SP_FS_CTRL_REG1_FACENESS 0x00080000
+#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
+#define A4XX_SP_FS_CTRL_REG1_FRAGCOORD 0x00200000
+
+#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_FS_OBJ_START 0x000022eb
+
+#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec
+
+#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed
+
+#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
+
+#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
+#define A4XX_SP_FS_OUTPUT_REG_MRT__MASK 0x0000000f
+#define A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_SP_FS_OUTPUT_REG_MRT__MASK;
+}
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
+#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK 0xff000000
+#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT 24
+static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
+#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+#define A4XX_SP_FS_MRT_REG_COLOR_SINT 0x00000400
+#define A4XX_SP_FS_MRT_REG_COLOR_UINT 0x00000800
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000
+#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12
+static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
+}
+#define A4XX_SP_FS_MRT_REG_COLOR_SRGB 0x00040000
+
+#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300
+#define A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_VARYING 0x00000002
+#define A4XX_SP_CS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_CS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A4XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A4XX_SP_CS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A4XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00400000
+
+#define REG_A4XX_SP_CS_OBJ_OFFSET_REG 0x00002301
+
+#define REG_A4XX_SP_CS_OBJ_START 0x00002302
+
+#define REG_A4XX_SP_CS_PVT_MEM_PARAM 0x00002303
+
+#define REG_A4XX_SP_CS_PVT_MEM_ADDR 0x00002304
+
+#define REG_A4XX_SP_CS_PVT_MEM_SIZE 0x00002305
+
+#define REG_A4XX_SP_CS_LENGTH_REG 0x00002306
+
+#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_HS_OBJ_START 0x0000230e
+
+#define REG_A4XX_SP_HS_PVT_MEM_PARAM 0x0000230f
+
+#define REG_A4XX_SP_HS_PVT_MEM_ADDR 0x00002310
+
+#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312
+
+#define REG_A4XX_SP_DS_PARAM_REG 0x0000231a
+#define A4XX_SP_DS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_DS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_DS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_OUT(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000231b + 0x1*i0; }
+#define A4XX_SP_DS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000232c + 0x1*i0; }
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_DS_OBJ_START 0x00002335
+
+#define REG_A4XX_SP_DS_PVT_MEM_PARAM 0x00002336
+
+#define REG_A4XX_SP_DS_PVT_MEM_ADDR 0x00002337
+
+#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339
+
+#define REG_A4XX_SP_GS_PARAM_REG 0x00002341
+#define A4XX_SP_GS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_POSREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK 0x0000ff00
+#define A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT 8
+static inline uint32_t A4XX_SP_GS_PARAM_REG_PRIMREGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_PRIMREGID__SHIFT) & A4XX_SP_GS_PARAM_REG_PRIMREGID__MASK;
+}
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK 0xfff00000
+#define A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT 20
+static inline uint32_t A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__SHIFT) & A4XX_SP_GS_PARAM_REG_TOTALGSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_OUT(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_OUT_REG(uint32_t i0) { return 0x00002342 + 0x1*i0; }
+#define A4XX_SP_GS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A4XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A4XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A4XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x00002353 + 0x1*i0; }
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A4XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A4XX_SP_GS_OBJ_START 0x0000235c
+
+#define REG_A4XX_SP_GS_PVT_MEM_PARAM 0x0000235d
+
+#define REG_A4XX_SP_GS_PVT_MEM_ADDR 0x0000235e
+
+#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
+
+#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
+
+#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61
+
+#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
+
+#define REG_A4XX_VPC_ATTR 0x00002140
+#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
+#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A4XX_VPC_ATTR_PSIZE 0x00000200
+#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000
+#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A4XX_VPC_ATTR_ENABLE 0x02000000
+
+#define REG_A4XX_VPC_PACK 0x00002141
+#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff
+#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0
+static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
+
+#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e
+
+#define REG_A4XX_VSC_BIN_SIZE 0x00000c00
+#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
+
+#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02
+
+#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
+#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
+#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
+#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
+static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
+
+#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50
+
+#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51
+
+#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
+
+#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
+
+#define REG_A4XX_VGT_EVENT_INITIATOR 0x000021d9
+
+#define REG_A4XX_VFD_CONTROL_0 0x00002200
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
+#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00
+#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9
+static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000
+#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000
+#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26
+static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_1 0x00002201
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_2 0x00002202
+
+#define REG_A4XX_VFD_CONTROL_3 0x00002203
+#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK 0x0000ff00
+#define A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_VTXCNT__SHIFT) & A4XX_VFD_CONTROL_3_REGID_VTXCNT__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A4XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A4XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A4XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A4XX_VFD_CONTROL_4 0x00002204
+
+#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
+
+static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
+#define A4XX_VFD_FETCH_INSTR_0_INSTANCED 0x00100000
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xffffffff
+#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
+#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK 0x000001ff
+#define A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT 0
+static inline uint32_t A4XX_VFD_FETCH_INSTR_3_STEPRATE(uint32_t val)
+{
+ return ((val) << A4XX_VFD_FETCH_INSTR_3_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_3_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+
+static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_INT 0x00100000
+#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
+
+#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
+
+#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
+
+#define REG_A4XX_TPL1_TP_TEX_COUNT 0x00002381
+#define A4XX_TPL1_TP_TEX_COUNT_VS__MASK 0x000000ff
+#define A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT 0
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_VS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_VS__MASK;
+}
+#define A4XX_TPL1_TP_TEX_COUNT_HS__MASK 0x0000ff00
+#define A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT 8
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_HS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_HS__MASK;
+}
+#define A4XX_TPL1_TP_TEX_COUNT_DS__MASK 0x00ff0000
+#define A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT 16
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_DS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_DS__MASK;
+}
+#define A4XX_TPL1_TP_TEX_COUNT_GS__MASK 0xff000000
+#define A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT 24
+static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_GS__MASK;
+}
+
+#define REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002384
+
+#define REG_A4XX_TPL1_TP_HS_BORDER_COLOR_BASE_ADDR 0x00002387
+
+#define REG_A4XX_TPL1_TP_DS_BORDER_COLOR_BASE_ADDR 0x0000238a
+
+#define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d
+
+#define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0
+#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK 0x000000ff
+#define A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT 0
+static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_FS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_FS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_FS__MASK;
+}
+#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK 0x0000ff00
+#define A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT 8
+static inline uint32_t A4XX_TPL1_TP_FS_TEX_COUNT_CS(uint32_t val)
+{
+ return ((val) << A4XX_TPL1_TP_FS_TEX_COUNT_CS__SHIFT) & A4XX_TPL1_TP_FS_TEX_COUNT_CS__MASK;
+}
+
+#define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1
+
+#define REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR 0x000023a4
+
+#define REG_A4XX_TPL1_TP_CS_SAMPLER_BASE_ADDR 0x000023a5
+
+#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
+
+#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
+
+#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f
+
+#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
+
+#define REG_A4XX_GRAS_CNTL 0x00002003
+#define A4XX_GRAS_CNTL_IJ_PERSP 0x00000001
+#define A4XX_GRAS_CNTL_IJ_LINEAR 0x00000002
+
+#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071
+#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
+#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_POLY_OFFSET_CLAMP 0x00002076
+#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK 0xffffffff
+#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
+#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
+static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
+{
+ return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
+}
+
+#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
+#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+#define A4XX_GRAS_SU_MODE_CONTROL_MSAA_ENABLE 0x00002000
+#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
+
+#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
+#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
+#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
+static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_BR 0x0000209e
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_X__MASK;
+}
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_BR_Y__MASK;
+}
+
+#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK 0x00007fff
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT 0
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_X(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_X__MASK;
+}
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK 0x7fff0000
+#define A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT 16
+static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
+{
+ return ((val) << A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__SHIFT) & A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y__MASK;
+}
+
+#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
+
+#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83
+
+#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84
+
+#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88
+
+#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a
+
+#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b
+
+#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
+
+#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
+
+#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
+
+#define REG_A4XX_HLSQ_MODE_CONTROL 0x00000e05
+
+#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d
+
+#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
+#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
+static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK 0x00ff0000
+#define A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT 16
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000003fc
+#define A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK 0x0003fc00
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT 10
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK 0x03fc0000
+#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT 18
+static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A4XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
+
+#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
+
+#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK 0x00fff000
+#define A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
+#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK 0x00fff000
+#define A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_KERNEL_CONST 0x000023d6
+#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK 0x00fff000
+#define A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__SHIFT) & A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID__MASK;
+}
+
+#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_X 0x000023d7
+
+#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Y 0x000023d8
+
+#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Z 0x000023d9
+
+#define REG_A4XX_HLSQ_CL_WG_OFFSET 0x000023da
+#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK 0x00000fff
+#define A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__SHIFT) & A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID__MASK;
+}
+
+#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
+
+#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
+#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
+#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A4XX_PC_BIN_BASE 0x000021c0
+
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
+#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
+#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
+#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT) & A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
+#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
+
+#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040
+
+#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
+
+#define REG_A4XX_PC_GS_PARAM 0x000021e5
+#define A4XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A4XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A4XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A4XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A4XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A4XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A4XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A4XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
+#define A4XX_PC_GS_PARAM_LAYER 0x80000000
+
+#define REG_A4XX_PC_HS_PARAM 0x000021e7
+#define A4XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A4XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A4XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A4XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A4XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
+
+#define REG_A4XX_VBIF_VERSION 0x00003000
+
+#define REG_A4XX_VBIF_CLKON 0x00003001
+#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001
+
+#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A4XX_VBIF_PERF_CNT_EN0 0x000030c0
+
+#define REG_A4XX_VBIF_PERF_CNT_EN1 0x000030c1
+
+#define REG_A4XX_VBIF_PERF_CNT_EN2 0x000030c2
+
+#define REG_A4XX_VBIF_PERF_CNT_EN3 0x000030c3
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A4XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A4XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A4XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A4XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A4XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A4XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
+
+#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
+
+#define REG_A4XX_UNKNOWN_0D01 0x00000d01
+
+#define REG_A4XX_UNKNOWN_0E42 0x00000e42
+
+#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
+
+#define REG_A4XX_UNKNOWN_2001 0x00002001
+
+#define REG_A4XX_UNKNOWN_209B 0x0000209b
+
+#define REG_A4XX_UNKNOWN_20EF 0x000020ef
+
+#define REG_A4XX_UNKNOWN_2152 0x00002152
+
+#define REG_A4XX_UNKNOWN_2153 0x00002153
+
+#define REG_A4XX_UNKNOWN_2154 0x00002154
+
+#define REG_A4XX_UNKNOWN_2155 0x00002155
+
+#define REG_A4XX_UNKNOWN_2156 0x00002156
+
+#define REG_A4XX_UNKNOWN_2157 0x00002157
+
+#define REG_A4XX_UNKNOWN_21C3 0x000021c3
+
+#define REG_A4XX_UNKNOWN_21E6 0x000021e6
+
+#define REG_A4XX_UNKNOWN_2209 0x00002209
+
+#define REG_A4XX_UNKNOWN_22D7 0x000022d7
+
+#define REG_A4XX_UNKNOWN_2352 0x00002352
+
+#define REG_A4XX_TEX_SAMP_0 0x00000000
+#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A4XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A4XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
+{
+ return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A4XX_TEX_SAMP_1 0x00000001
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_0 0x00000000
+#define A4XX_TEX_CONST_0_TILED 0x00000001
+#define A4XX_TEX_CONST_0_SRGB 0x00000004
+#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
+{
+ return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A4XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A4XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A4XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_0_MIPLVLS__SHIFT) & A4XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A4XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
+{
+ return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
+}
+#define A4XX_TEX_CONST_0_TYPE__MASK 0xe0000000
+#define A4XX_TEX_CONST_0_TYPE__SHIFT 29
+static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
+{
+ return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_1 0x00000001
+#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff
+#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000
+#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
+static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_2 0x00000002
+#define A4XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A4XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A4XX_TEX_CONST_2_PITCHALIGN__MASK;
+}
+#define A4XX_TEX_CONST_2_BUFFER 0x00000040
+#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
+#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
+static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A4XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_3 0x00000003
+#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
+#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A4XX_TEX_CONST_3_DEPTH__MASK 0x7ffc0000
+#define A4XX_TEX_CONST_3_DEPTH__SHIFT 18
+static inline uint32_t A4XX_TEX_CONST_3_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_TEX_CONST_3_DEPTH__SHIFT) & A4XX_TEX_CONST_3_DEPTH__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_4 0x00000004
+#define A4XX_TEX_CONST_4_LAYERSZ__MASK 0x0000000f
+#define A4XX_TEX_CONST_4_LAYERSZ__SHIFT 0
+static inline uint32_t A4XX_TEX_CONST_4_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A4XX_TEX_CONST_4_LAYERSZ__SHIFT) & A4XX_TEX_CONST_4_LAYERSZ__MASK;
+}
+#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffe0
+#define A4XX_TEX_CONST_4_BASE__SHIFT 5
+static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A4XX_TEX_CONST_5 0x00000005
+
+#define REG_A4XX_TEX_CONST_6 0x00000006
+
+#define REG_A4XX_TEX_CONST_7 0x00000007
+
+#define REG_A4XX_SSBO_0_0 0x00000000
+#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0
+#define A4XX_SSBO_0_0_BASE__SHIFT 5
+static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
+}
+
+#define REG_A4XX_SSBO_0_1 0x00000001
+#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A4XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_2 0x00000002
+#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_3 0x00000003
+#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A4XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A4XX_SSBO_1_0 0x00000000
+#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f
+#define A4XX_SSBO_1_0_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK;
+}
+#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A4XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK;
+}
+#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A4XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A4XX_SSBO_1_1 0x00000001
+#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A4XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
+}
+
+
+#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
new file mode 100644
index 0000000000..8b4cdf95f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -0,0 +1,741 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+#include "a4xx_gpu.h"
+
+#define A4XX_INT0_MASK \
+ (A4XX_INT0_RBBM_AHB_ERROR | \
+ A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A4XX_INT0_CP_T0_PACKET_IN_IB | \
+ A4XX_INT0_CP_OPCODE_ERROR | \
+ A4XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A4XX_INT0_CP_HW_FAULT | \
+ A4XX_INT0_CP_IB1_INT | \
+ A4XX_INT0_CP_IB2_INT | \
+ A4XX_INT0_CP_RB_INT | \
+ A4XX_INT0_CP_REG_PROTECT_FAULT | \
+ A4XX_INT0_CP_AHB_ERROR_HALT | \
+ A4XX_INT0_CACHE_FLUSH_TS | \
+ A4XX_INT0_UCHE_OOB_ACCESS)
+
+extern bool hang_debug;
+static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
+
+static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ OUT_PKT2(ring);
+ break;
+ }
+ }
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->seqno);
+
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+
+ adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
+}
+
+/*
+ * a4xx_enable_hwcg() - Program the clock control registers
+ * @device: The adreno device pointer
+ */
+static void a4xx_enable_hwcg(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
+ for (i = 0; i < 4; i++)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
+
+ /* Disable L1 clocking in A420 due to CCU issues with it */
+ for (i = 0; i < 4; i++) {
+ if (adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00002020);
+ } else {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
+ 0x00022020);
+ }
+ }
+
+ /* No CCU for A405 */
+ if (!adreno_is_a405(adreno_gpu)) {
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+ 0x00000922);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+ 0x00000000);
+ }
+
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+ 0x00000001);
+ }
+ }
+
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+ /* Early A430's have a timing issue with SP/TP power collapse;
+ disabling HW clock gating prevents it. */
+ if (adreno_is_a430(adreno_gpu) && adreno_patchid(adreno_gpu) < 2)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+ else
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
+}
+
+
+static bool a4xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
+ return a4xx_idle(gpu);
+}
+
+static int a4xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ if (adreno_is_a405(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
+ gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection */
+ gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+ }
+
+ /* Enable the RBBM error reporting bits */
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting*/
+ gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Enable power counters*/
+ gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
+
+ /*
+ * Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
+ (1 << 30) | 0xFFFF);
+
+ gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(a4xx_gpu->ocmem.base >> 14));
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* use the first CP counter for timestamp queries.. userspace may set
+ * this as well but it selects the same counter/countable:
+ */
+ gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+ if (adreno_is_a430(adreno_gpu))
+ gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
+ /* Disable L2 bypass to avoid UCHE out of bounds errors */
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
+ gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
+
+ gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
+ (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+
+ /* On A430 enable SP regfile sleep for power savings */
+ /* TODO downstream does this for !420, so maybe applies for 405 too? */
+ if (!adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+ 0x00000441);
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+ 0x00000441);
+ }
+
+ a4xx_enable_hwcg(gpu);
+
+ /*
+ * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
+ * due to timing issue with HLSQ_TP_CLK_EN
+ */
+ if (adreno_is_a420(adreno_gpu)) {
+ unsigned int val;
+ val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
+ val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
+ val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
+ }
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
+
+
+ /* RB registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
+
+ /* HLSQ registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
+
+ /* VPC registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
+
+ /* SMMU registers */
+ gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %u", ptr[0]);
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
+
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a4xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a4xx_dump(gpu);
+
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a4xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
+
+ kfree(a4xx_gpu);
+}
+
+static bool a4xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
+ DBG("%s: Int status %08x", gpu->name, status);
+
+ if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
+ uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
+ printk("CP | Protected mode error| %s | addr=%x\n",
+ reg & (1 << 24) ? "WRITE" : "READ",
+ (reg & 0xFFFFF) >> 2);
+ }
+
+ gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a4xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* VMIDMT */
+ 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
+ 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
+ 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
+ 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
+ 0x1380, 0x1380,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* XPU */
+ 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
+ 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
+ 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
+ /* VBIF */
+ 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
+ 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
+ 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
+ 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
+ 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
+ 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
+ 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
+ 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
+ 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
+ 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
+ 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
+ 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
+ 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
+ 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
+ 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
+ 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
+ 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
+ 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
+ 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
+ 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
+ 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
+ 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
+ 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
+ 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
+ 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
+ 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
+ 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
+ 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
+ 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a405_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* VBIF version 0x20050000*/
+ 0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
+ 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
+ 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
+ 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
+ 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
+ 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
+ 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
+ 0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
+ ~0 /* sentinel */
+};
+
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
+}
+
+static void a4xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A4XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ unsigned int reg;
+ /* Set the default register values; set SW_COLLAPSE to 0 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+ do {
+ udelay(5);
+ reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+ } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+ }
+ return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ /* Set the default register values; set SW_COLLAPSE to 1 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+ }
+ return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
+
+ return 0;
+}
+
+static u64 a4xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ u64 busy_cycles;
+
+ busy_cycles = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_RBBM_1_LO);
+ *out_sample_rate = clk_get_rate(gpu->core_clk);
+
+ return busy_cycles;
+}
+
+static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
+ return ring->memptrs->rptr;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a4xx_hw_init,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
+ .recover = a4xx_recover,
+ .submit = a4xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a4xx_irq,
+ .destroy = a4xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a4xx_gpu_busy,
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_create_address_space,
+ .get_rptr = a4xx_get_rptr,
+ },
+ .get_timestamp = a4xx_get_timestamp,
+};
+
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+{
+ struct a4xx_gpu *a4xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct icc_path *ocmem_icc_path;
+ struct icc_path *icc_path;
+ int ret;
+
+ if (!pdev) {
+ DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
+ if (!a4xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a4xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = NULL;
+ gpu->num_perfcntrs = 0;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
+ a4xx_registers;
+
+ /* if needed, allocate gmem: */
+ ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
+ &a4xx_gpu->ocmem);
+ if (ret)
+ goto fail;
+
+ if (!gpu->aspace) {
+ /* TODO we think it is possible to configure the GPU to
+ * restrict access to VRAM carveout. But the required
+ * registers are unknown. For now just bail out and
+ * limp along with just modesetting. If it turns out
+ * to not be possible to restrict access, then we must
+ * implement a cmdstream validator.
+ */
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
+ }
+
+ icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
+ if (IS_ERR(icc_path)) {
+ ret = PTR_ERR(icc_path);
+ goto fail;
+ }
+
+ ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
+ if (IS_ERR(ocmem_icc_path)) {
+ ret = PTR_ERR(ocmem_icc_path);
+ /* allow -ENODATA, ocmem icc is optional */
+ if (ret != -ENODATA)
+ goto fail;
+ ocmem_icc_path = NULL;
+ }
+
+ /*
+ * Set the ICC path to maximum speed for now by multiplying the fastest
+ * frequency by the bus width (8). We'll want to scale this later on to
+ * improve battery life.
+ */
+ icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+ icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
+
+ return gpu;
+
+fail:
+ if (a4xx_gpu)
+ a4xx_destroy(&a4xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
new file mode 100644
index 0000000000..a01448cba2
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+#ifndef __A4XX_GPU_H__
+#define __A4XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a4xx.xml.h"
+
+struct a4xx_gpu {
+ struct adreno_gpu base;
+
+ /* if OCMEM is used for GMEM: */
+ struct adreno_ocmem ocmem;
+};
+#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+
+#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 0000000000..03b7ee592b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,5498 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2023 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_A8_UNORM = 2,
+ RB5_R8_UNORM = 3,
+ RB5_R8_SNORM = 4,
+ RB5_R8_UINT = 5,
+ RB5_R8_SINT = 6,
+ RB5_R4G4B4A4_UNORM = 8,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R5G6B5_UNORM = 14,
+ RB5_R8G8_UNORM = 15,
+ RB5_R8G8_SNORM = 16,
+ RB5_R8G8_UINT = 17,
+ RB5_R8G8_SINT = 18,
+ RB5_R16_UNORM = 21,
+ RB5_R16_SNORM = 22,
+ RB5_R16_FLOAT = 23,
+ RB5_R16_UINT = 24,
+ RB5_R16_SINT = 25,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_SNORM = 50,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R8G8B8A8_SINT = 52,
+ RB5_R10G10B10A2_UNORM = 55,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R11G11B10_FLOAT = 66,
+ RB5_R16G16_UNORM = 67,
+ RB5_R16G16_SNORM = 68,
+ RB5_R16G16_FLOAT = 69,
+ RB5_R16G16_UINT = 70,
+ RB5_R16G16_SINT = 71,
+ RB5_R32_FLOAT = 74,
+ RB5_R32_UINT = 75,
+ RB5_R32_SINT = 76,
+ RB5_R16G16B16A16_UNORM = 96,
+ RB5_R16G16B16A16_SNORM = 97,
+ RB5_R16G16B16A16_FLOAT = 98,
+ RB5_R16G16B16A16_UINT = 99,
+ RB5_R16G16B16A16_SINT = 100,
+ RB5_R32G32_FLOAT = 103,
+ RB5_R32G32_UINT = 104,
+ RB5_R32G32_SINT = 105,
+ RB5_R32G32B32A32_FLOAT = 130,
+ RB5_R32G32B32A32_UINT = 131,
+ RB5_R32G32B32A32_SINT = 132,
+ RB5_NONE = 255,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_10_10_10_2_UNORM = 54,
+ VFMT5_10_10_10_2_SNORM = 57,
+ VFMT5_10_10_10_2_UINT = 58,
+ VFMT5_10_10_10_2_SINT = 59,
+ VFMT5_11_11_10_FLOAT = 66,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+ VFMT5_NONE = 255,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_8_SNORM = 4,
+ TFMT5_8_UINT = 5,
+ TFMT5_8_SINT = 6,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_5_5_1_UNORM = 10,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_8_8_UNORM = 15,
+ TFMT5_8_8_SNORM = 16,
+ TFMT5_8_8_UINT = 17,
+ TFMT5_8_8_SINT = 18,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_UNORM = 21,
+ TFMT5_16_SNORM = 22,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_16_UINT = 24,
+ TFMT5_16_SINT = 25,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_8_8_8_UNORM = 49,
+ TFMT5_8_8_8_8_SNORM = 50,
+ TFMT5_8_8_8_8_UINT = 51,
+ TFMT5_8_8_8_8_SINT = 52,
+ TFMT5_9_9_9_E5_FLOAT = 53,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_10_10_10_2_UINT = 58,
+ TFMT5_11_11_10_FLOAT = 66,
+ TFMT5_16_16_UNORM = 67,
+ TFMT5_16_16_SNORM = 68,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_16_16_UINT = 70,
+ TFMT5_16_16_SINT = 71,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_32_UINT = 75,
+ TFMT5_32_SINT = 76,
+ TFMT5_16_16_16_16_UNORM = 96,
+ TFMT5_16_16_16_16_SNORM = 97,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_16_16_16_16_UINT = 99,
+ TFMT5_16_16_16_16_SINT = 100,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_UINT = 104,
+ TFMT5_32_32_SINT = 105,
+ TFMT5_32_32_32_UINT = 114,
+ TFMT5_32_32_32_SINT = 115,
+ TFMT5_32_32_32_FLOAT = 116,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_32_32_32_32_UINT = 131,
+ TFMT5_32_32_32_32_SINT = 132,
+ TFMT5_X8Z24_UNORM = 160,
+ TFMT5_ETC2_RG11_UNORM = 171,
+ TFMT5_ETC2_RG11_SNORM = 172,
+ TFMT5_ETC2_R11_UNORM = 173,
+ TFMT5_ETC2_R11_SNORM = 174,
+ TFMT5_ETC1 = 175,
+ TFMT5_ETC2_RGB8 = 176,
+ TFMT5_ETC2_RGBA8 = 177,
+ TFMT5_ETC2_RGB8A1 = 178,
+ TFMT5_DXT1 = 179,
+ TFMT5_DXT3 = 180,
+ TFMT5_DXT5 = 181,
+ TFMT5_RGTC1_UNORM = 183,
+ TFMT5_RGTC1_SNORM = 184,
+ TFMT5_RGTC2_UNORM = 187,
+ TFMT5_RGTC2_SNORM = 188,
+ TFMT5_BPTC_UFLOAT = 190,
+ TFMT5_BPTC_FLOAT = 191,
+ TFMT5_BPTC = 192,
+ TFMT5_ASTC_4x4 = 193,
+ TFMT5_ASTC_5x4 = 194,
+ TFMT5_ASTC_5x5 = 195,
+ TFMT5_ASTC_6x5 = 196,
+ TFMT5_ASTC_6x6 = 197,
+ TFMT5_ASTC_8x5 = 198,
+ TFMT5_ASTC_8x6 = 199,
+ TFMT5_ASTC_8x8 = 200,
+ TFMT5_ASTC_10x5 = 201,
+ TFMT5_ASTC_10x6 = 202,
+ TFMT5_ASTC_10x8 = 203,
+ TFMT5_ASTC_10x10 = 204,
+ TFMT5_ASTC_12x10 = 205,
+ TFMT5_ASTC_12x12 = 206,
+ TFMT5_NONE = 255,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_blit_buf {
+ BLIT_MRT0 = 0,
+ BLIT_MRT1 = 1,
+ BLIT_MRT2 = 2,
+ BLIT_MRT3 = 3,
+ BLIT_MRT4 = 4,
+ BLIT_MRT5 = 5,
+ BLIT_MRT6 = 6,
+ BLIT_MRT7 = 7,
+ BLIT_ZS = 8,
+ BLIT_S = 9,
+};
+
+enum a5xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+ PERF_CP_BUSY_GFX_CORE_IDLE = 1,
+ PERF_CP_BUSY_CYCLES = 2,
+ PERF_CP_PFP_IDLE = 3,
+ PERF_CP_PFP_BUSY_WORKING = 4,
+ PERF_CP_PFP_STALL_CYCLES_ANY = 5,
+ PERF_CP_PFP_STARVE_CYCLES_ANY = 6,
+ PERF_CP_PFP_ICACHE_MISS = 7,
+ PERF_CP_PFP_ICACHE_HIT = 8,
+ PERF_CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ PERF_CP_ME_BUSY_WORKING = 10,
+ PERF_CP_ME_IDLE = 11,
+ PERF_CP_ME_STARVE_CYCLES_ANY = 12,
+ PERF_CP_ME_FIFO_EMPTY_PFP_IDLE = 13,
+ PERF_CP_ME_FIFO_EMPTY_PFP_BUSY = 14,
+ PERF_CP_ME_FIFO_FULL_ME_BUSY = 15,
+ PERF_CP_ME_FIFO_FULL_ME_NON_WORKING = 16,
+ PERF_CP_ME_STALL_CYCLES_ANY = 17,
+ PERF_CP_ME_ICACHE_MISS = 18,
+ PERF_CP_ME_ICACHE_HIT = 19,
+ PERF_CP_NUM_PREEMPTIONS = 20,
+ PERF_CP_PREEMPTION_REACTION_DELAY = 21,
+ PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 22,
+ PERF_CP_PREEMPTION_SWITCH_IN_TIME = 23,
+ PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 24,
+ PERF_CP_PREDICATED_DRAWS_KILLED = 25,
+ PERF_CP_MODE_SWITCH = 26,
+ PERF_CP_ZPASS_DONE = 27,
+ PERF_CP_CONTEXT_DONE = 28,
+ PERF_CP_CACHE_FLUSH = 29,
+ PERF_CP_LONG_PREEMPTIONS = 30,
+};
+
+enum a5xx_rbbm_perfcounter_select {
+ PERF_RBBM_ALWAYS_COUNT = 0,
+ PERF_RBBM_ALWAYS_ON = 1,
+ PERF_RBBM_TSE_BUSY = 2,
+ PERF_RBBM_RAS_BUSY = 3,
+ PERF_RBBM_PC_DCALL_BUSY = 4,
+ PERF_RBBM_PC_VSD_BUSY = 5,
+ PERF_RBBM_STATUS_MASKED = 6,
+ PERF_RBBM_COM_BUSY = 7,
+ PERF_RBBM_DCOM_BUSY = 8,
+ PERF_RBBM_VBIF_BUSY = 9,
+ PERF_RBBM_VSC_BUSY = 10,
+ PERF_RBBM_TESS_BUSY = 11,
+ PERF_RBBM_UCHE_BUSY = 12,
+ PERF_RBBM_HLSQ_BUSY = 13,
+};
+
+enum a5xx_pc_perfcounter_select {
+ PERF_PC_BUSY_CYCLES = 0,
+ PERF_PC_WORKING_CYCLES = 1,
+ PERF_PC_STALL_CYCLES_VFD = 2,
+ PERF_PC_STALL_CYCLES_TSE = 3,
+ PERF_PC_STALL_CYCLES_VPC = 4,
+ PERF_PC_STALL_CYCLES_UCHE = 5,
+ PERF_PC_STALL_CYCLES_TESS = 6,
+ PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
+ PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
+ PERF_PC_PASS1_TF_STALL_CYCLES = 9,
+ PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
+ PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
+ PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
+ PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
+ PERF_PC_STARVE_CYCLES_DI = 14,
+ PERF_PC_VIS_STREAMS_LOADED = 15,
+ PERF_PC_INSTANCES = 16,
+ PERF_PC_VPC_PRIMITIVES = 17,
+ PERF_PC_DEAD_PRIM = 18,
+ PERF_PC_LIVE_PRIM = 19,
+ PERF_PC_VERTEX_HITS = 20,
+ PERF_PC_IA_VERTICES = 21,
+ PERF_PC_IA_PRIMITIVES = 22,
+ PERF_PC_GS_PRIMITIVES = 23,
+ PERF_PC_HS_INVOCATIONS = 24,
+ PERF_PC_DS_INVOCATIONS = 25,
+ PERF_PC_VS_INVOCATIONS = 26,
+ PERF_PC_GS_INVOCATIONS = 27,
+ PERF_PC_DS_PRIMITIVES = 28,
+ PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
+ PERF_PC_3D_DRAWCALLS = 30,
+ PERF_PC_2D_DRAWCALLS = 31,
+ PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
+ PERF_TESS_BUSY_CYCLES = 33,
+ PERF_TESS_WORKING_CYCLES = 34,
+ PERF_TESS_STALL_CYCLES_PC = 35,
+ PERF_TESS_STARVE_CYCLES_PC = 36,
+};
+
+enum a5xx_vfd_perfcounter_select {
+ PERF_VFD_BUSY_CYCLES = 0,
+ PERF_VFD_STALL_CYCLES_UCHE = 1,
+ PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
+ PERF_VFD_STALL_CYCLES_MISS_VB = 3,
+ PERF_VFD_STALL_CYCLES_MISS_Q = 4,
+ PERF_VFD_STALL_CYCLES_SP_INFO = 5,
+ PERF_VFD_STALL_CYCLES_SP_ATTR = 6,
+ PERF_VFD_STALL_CYCLES_VFDP_VB = 7,
+ PERF_VFD_STALL_CYCLES_VFDP_Q = 8,
+ PERF_VFD_DECODER_PACKER_STALL = 9,
+ PERF_VFD_STARVE_CYCLES_UCHE = 10,
+ PERF_VFD_RBUFFER_FULL = 11,
+ PERF_VFD_ATTR_INFO_FIFO_FULL = 12,
+ PERF_VFD_DECODED_ATTRIBUTE_BYTES = 13,
+ PERF_VFD_NUM_ATTRIBUTES = 14,
+ PERF_VFD_INSTRUCTIONS = 15,
+ PERF_VFD_UPPER_SHADER_FIBERS = 16,
+ PERF_VFD_LOWER_SHADER_FIBERS = 17,
+ PERF_VFD_MODE_0_FIBERS = 18,
+ PERF_VFD_MODE_1_FIBERS = 19,
+ PERF_VFD_MODE_2_FIBERS = 20,
+ PERF_VFD_MODE_3_FIBERS = 21,
+ PERF_VFD_MODE_4_FIBERS = 22,
+ PERF_VFD_TOTAL_VERTICES = 23,
+ PERF_VFD_NUM_ATTR_MISS = 24,
+ PERF_VFD_1_BURST_REQ = 25,
+ PERF_VFDP_STALL_CYCLES_VFD = 26,
+ PERF_VFDP_STALL_CYCLES_VFD_INDEX = 27,
+ PERF_VFDP_STALL_CYCLES_VFD_PROG = 28,
+ PERF_VFDP_STARVE_CYCLES_PC = 29,
+ PERF_VFDP_VS_STAGE_32_WAVES = 30,
+};
+
+enum a5xx_hlsq_perfcounter_select {
+ PERF_HLSQ_BUSY_CYCLES = 0,
+ PERF_HLSQ_STALL_CYCLES_UCHE = 1,
+ PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
+ PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
+ PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
+ PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
+ PERF_HLSQ_FS_STAGE_32_WAVES = 6,
+ PERF_HLSQ_FS_STAGE_64_WAVES = 7,
+ PERF_HLSQ_QUADS = 8,
+ PERF_HLSQ_SP_STATE_COPY_TRANS_FS_STAGE = 9,
+ PERF_HLSQ_SP_STATE_COPY_TRANS_VS_STAGE = 10,
+ PERF_HLSQ_TP_STATE_COPY_TRANS_FS_STAGE = 11,
+ PERF_HLSQ_TP_STATE_COPY_TRANS_VS_STAGE = 12,
+ PERF_HLSQ_CS_INVOCATIONS = 13,
+ PERF_HLSQ_COMPUTE_DRAWCALLS = 14,
+};
+
+enum a5xx_vpc_perfcounter_select {
+ PERF_VPC_BUSY_CYCLES = 0,
+ PERF_VPC_WORKING_CYCLES = 1,
+ PERF_VPC_STALL_CYCLES_UCHE = 2,
+ PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
+ PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
+ PERF_VPC_STALL_CYCLES_PC = 5,
+ PERF_VPC_STALL_CYCLES_SP_LM = 6,
+ PERF_VPC_POS_EXPORT_STALL_CYCLES = 7,
+ PERF_VPC_STARVE_CYCLES_SP = 8,
+ PERF_VPC_STARVE_CYCLES_LRZ = 9,
+ PERF_VPC_PC_PRIMITIVES = 10,
+ PERF_VPC_SP_COMPONENTS = 11,
+ PERF_VPC_SP_LM_PRIMITIVES = 12,
+ PERF_VPC_SP_LM_COMPONENTS = 13,
+ PERF_VPC_SP_LM_DWORDS = 14,
+ PERF_VPC_STREAMOUT_COMPONENTS = 15,
+ PERF_VPC_GRANT_PHASES = 16,
+};
+
+enum a5xx_tse_perfcounter_select {
+ PERF_TSE_BUSY_CYCLES = 0,
+ PERF_TSE_CLIPPING_CYCLES = 1,
+ PERF_TSE_STALL_CYCLES_RAS = 2,
+ PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
+ PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
+ PERF_TSE_STARVE_CYCLES_PC = 5,
+ PERF_TSE_INPUT_PRIM = 6,
+ PERF_TSE_INPUT_NULL_PRIM = 7,
+ PERF_TSE_TRIVAL_REJ_PRIM = 8,
+ PERF_TSE_CLIPPED_PRIM = 9,
+ PERF_TSE_ZERO_AREA_PRIM = 10,
+ PERF_TSE_FACENESS_CULLED_PRIM = 11,
+ PERF_TSE_ZERO_PIXEL_PRIM = 12,
+ PERF_TSE_OUTPUT_NULL_PRIM = 13,
+ PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
+ PERF_TSE_CINVOCATION = 15,
+ PERF_TSE_CPRIMITIVES = 16,
+ PERF_TSE_2D_INPUT_PRIM = 17,
+ PERF_TSE_2D_ALIVE_CLCLES = 18,
+};
+
+enum a5xx_ras_perfcounter_select {
+ PERF_RAS_BUSY_CYCLES = 0,
+ PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
+ PERF_RAS_STALL_CYCLES_LRZ = 2,
+ PERF_RAS_STARVE_CYCLES_TSE = 3,
+ PERF_RAS_SUPER_TILES = 4,
+ PERF_RAS_8X4_TILES = 5,
+ PERF_RAS_MASKGEN_ACTIVE = 6,
+ PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
+ PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
+ PERF_RAS_PRIM_KILLED_INVISILBE = 9,
+};
+
+enum a5xx_lrz_perfcounter_select {
+ PERF_LRZ_BUSY_CYCLES = 0,
+ PERF_LRZ_STARVE_CYCLES_RAS = 1,
+ PERF_LRZ_STALL_CYCLES_RB = 2,
+ PERF_LRZ_STALL_CYCLES_VSC = 3,
+ PERF_LRZ_STALL_CYCLES_VPC = 4,
+ PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
+ PERF_LRZ_STALL_CYCLES_UCHE = 6,
+ PERF_LRZ_LRZ_READ = 7,
+ PERF_LRZ_LRZ_WRITE = 8,
+ PERF_LRZ_READ_LATENCY = 9,
+ PERF_LRZ_MERGE_CACHE_UPDATING = 10,
+ PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
+ PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
+ PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
+ PERF_LRZ_FULL_8X8_TILES = 14,
+ PERF_LRZ_PARTIAL_8X8_TILES = 15,
+ PERF_LRZ_TILE_KILLED = 16,
+ PERF_LRZ_TOTAL_PIXEL = 17,
+ PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
+};
+
+enum a5xx_uche_perfcounter_select {
+ PERF_UCHE_BUSY_CYCLES = 0,
+ PERF_UCHE_STALL_CYCLES_VBIF = 1,
+ PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
+ PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
+ PERF_UCHE_VBIF_READ_BEATS_TP = 4,
+ PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
+ PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
+ PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
+ PERF_UCHE_VBIF_READ_BEATS_SP = 8,
+ PERF_UCHE_READ_REQUESTS_TP = 9,
+ PERF_UCHE_READ_REQUESTS_VFD = 10,
+ PERF_UCHE_READ_REQUESTS_HLSQ = 11,
+ PERF_UCHE_READ_REQUESTS_LRZ = 12,
+ PERF_UCHE_READ_REQUESTS_SP = 13,
+ PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
+ PERF_UCHE_WRITE_REQUESTS_SP = 15,
+ PERF_UCHE_WRITE_REQUESTS_VPC = 16,
+ PERF_UCHE_WRITE_REQUESTS_VSC = 17,
+ PERF_UCHE_EVICTS = 18,
+ PERF_UCHE_BANK_REQ0 = 19,
+ PERF_UCHE_BANK_REQ1 = 20,
+ PERF_UCHE_BANK_REQ2 = 21,
+ PERF_UCHE_BANK_REQ3 = 22,
+ PERF_UCHE_BANK_REQ4 = 23,
+ PERF_UCHE_BANK_REQ5 = 24,
+ PERF_UCHE_BANK_REQ6 = 25,
+ PERF_UCHE_BANK_REQ7 = 26,
+ PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
+ PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
+ PERF_UCHE_GMEM_READ_BEATS = 29,
+ PERF_UCHE_FLAG_COUNT = 30,
+};
+
+enum a5xx_tp_perfcounter_select {
+ PERF_TP_BUSY_CYCLES = 0,
+ PERF_TP_STALL_CYCLES_UCHE = 1,
+ PERF_TP_LATENCY_CYCLES = 2,
+ PERF_TP_LATENCY_TRANS = 3,
+ PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
+ PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
+ PERF_TP_L1_CACHELINE_REQUESTS = 6,
+ PERF_TP_L1_CACHELINE_MISSES = 7,
+ PERF_TP_SP_TP_TRANS = 8,
+ PERF_TP_TP_SP_TRANS = 9,
+ PERF_TP_OUTPUT_PIXELS = 10,
+ PERF_TP_FILTER_WORKLOAD_16BIT = 11,
+ PERF_TP_FILTER_WORKLOAD_32BIT = 12,
+ PERF_TP_QUADS_RECEIVED = 13,
+ PERF_TP_QUADS_OFFSET = 14,
+ PERF_TP_QUADS_SHADOW = 15,
+ PERF_TP_QUADS_ARRAY = 16,
+ PERF_TP_QUADS_GRADIENT = 17,
+ PERF_TP_QUADS_1D = 18,
+ PERF_TP_QUADS_2D = 19,
+ PERF_TP_QUADS_BUFFER = 20,
+ PERF_TP_QUADS_3D = 21,
+ PERF_TP_QUADS_CUBE = 22,
+ PERF_TP_STATE_CACHE_REQUESTS = 23,
+ PERF_TP_STATE_CACHE_MISSES = 24,
+ PERF_TP_DIVERGENT_QUADS_RECEIVED = 25,
+ PERF_TP_BINDLESS_STATE_CACHE_REQUESTS = 26,
+ PERF_TP_BINDLESS_STATE_CACHE_MISSES = 27,
+ PERF_TP_PRT_NON_RESIDENT_EVENTS = 28,
+ PERF_TP_OUTPUT_PIXELS_POINT = 29,
+ PERF_TP_OUTPUT_PIXELS_BILINEAR = 30,
+ PERF_TP_OUTPUT_PIXELS_MIP = 31,
+ PERF_TP_OUTPUT_PIXELS_ANISO = 32,
+ PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 33,
+ PERF_TP_FLAG_CACHE_REQUESTS = 34,
+ PERF_TP_FLAG_CACHE_MISSES = 35,
+ PERF_TP_L1_5_L2_REQUESTS = 36,
+ PERF_TP_2D_OUTPUT_PIXELS = 37,
+ PERF_TP_2D_OUTPUT_PIXELS_POINT = 38,
+ PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 39,
+ PERF_TP_2D_FILTER_WORKLOAD_16BIT = 40,
+ PERF_TP_2D_FILTER_WORKLOAD_32BIT = 41,
+};
+
+enum a5xx_sp_perfcounter_select {
+ PERF_SP_BUSY_CYCLES = 0,
+ PERF_SP_ALU_WORKING_CYCLES = 1,
+ PERF_SP_EFU_WORKING_CYCLES = 2,
+ PERF_SP_STALL_CYCLES_VPC = 3,
+ PERF_SP_STALL_CYCLES_TP = 4,
+ PERF_SP_STALL_CYCLES_UCHE = 5,
+ PERF_SP_STALL_CYCLES_RB = 6,
+ PERF_SP_SCHEDULER_NON_WORKING = 7,
+ PERF_SP_WAVE_CONTEXTS = 8,
+ PERF_SP_WAVE_CONTEXT_CYCLES = 9,
+ PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
+ PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
+ PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
+ PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
+ PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
+ PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
+ PERF_SP_WAVE_CTRL_CYCLES = 16,
+ PERF_SP_WAVE_LOAD_CYCLES = 17,
+ PERF_SP_WAVE_EMIT_CYCLES = 18,
+ PERF_SP_WAVE_NOP_CYCLES = 19,
+ PERF_SP_WAVE_WAIT_CYCLES = 20,
+ PERF_SP_WAVE_FETCH_CYCLES = 21,
+ PERF_SP_WAVE_IDLE_CYCLES = 22,
+ PERF_SP_WAVE_END_CYCLES = 23,
+ PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
+ PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
+ PERF_SP_WAVE_JOIN_CYCLES = 26,
+ PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
+ PERF_SP_LM_STORE_INSTRUCTIONS = 28,
+ PERF_SP_LM_ATOMICS = 29,
+ PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
+ PERF_SP_GM_STORE_INSTRUCTIONS = 31,
+ PERF_SP_GM_ATOMICS = 32,
+ PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
+ PERF_SP_VS_STAGE_CFLOW_INSTRUCTIONS = 34,
+ PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 35,
+ PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 36,
+ PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 37,
+ PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 38,
+ PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 39,
+ PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 40,
+ PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 41,
+ PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 42,
+ PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 43,
+ PERF_SP_VS_INSTRUCTIONS = 44,
+ PERF_SP_FS_INSTRUCTIONS = 45,
+ PERF_SP_ADDR_LOCK_COUNT = 46,
+ PERF_SP_UCHE_READ_TRANS = 47,
+ PERF_SP_UCHE_WRITE_TRANS = 48,
+ PERF_SP_EXPORT_VPC_TRANS = 49,
+ PERF_SP_EXPORT_RB_TRANS = 50,
+ PERF_SP_PIXELS_KILLED = 51,
+ PERF_SP_ICL1_REQUESTS = 52,
+ PERF_SP_ICL1_MISSES = 53,
+ PERF_SP_ICL0_REQUESTS = 54,
+ PERF_SP_ICL0_MISSES = 55,
+ PERF_SP_HS_INSTRUCTIONS = 56,
+ PERF_SP_DS_INSTRUCTIONS = 57,
+ PERF_SP_GS_INSTRUCTIONS = 58,
+ PERF_SP_CS_INSTRUCTIONS = 59,
+ PERF_SP_GPR_READ = 60,
+ PERF_SP_GPR_WRITE = 61,
+ PERF_SP_LM_CH0_REQUESTS = 62,
+ PERF_SP_LM_CH1_REQUESTS = 63,
+ PERF_SP_LM_BANK_CONFLICTS = 64,
+};
+
+enum a5xx_rb_perfcounter_select {
+ PERF_RB_BUSY_CYCLES = 0,
+ PERF_RB_STALL_CYCLES_CCU = 1,
+ PERF_RB_STALL_CYCLES_HLSQ = 2,
+ PERF_RB_STALL_CYCLES_FIFO0_FULL = 3,
+ PERF_RB_STALL_CYCLES_FIFO1_FULL = 4,
+ PERF_RB_STALL_CYCLES_FIFO2_FULL = 5,
+ PERF_RB_STARVE_CYCLES_SP = 6,
+ PERF_RB_STARVE_CYCLES_LRZ_TILE = 7,
+ PERF_RB_STARVE_CYCLES_CCU = 8,
+ PERF_RB_STARVE_CYCLES_Z_PLANE = 9,
+ PERF_RB_STARVE_CYCLES_BARY_PLANE = 10,
+ PERF_RB_Z_WORKLOAD = 11,
+ PERF_RB_HLSQ_ACTIVE = 12,
+ PERF_RB_Z_READ = 13,
+ PERF_RB_Z_WRITE = 14,
+ PERF_RB_C_READ = 15,
+ PERF_RB_C_WRITE = 16,
+ PERF_RB_TOTAL_PASS = 17,
+ PERF_RB_Z_PASS = 18,
+ PERF_RB_Z_FAIL = 19,
+ PERF_RB_S_FAIL = 20,
+ PERF_RB_BLENDED_FXP_COMPONENTS = 21,
+ PERF_RB_BLENDED_FP16_COMPONENTS = 22,
+ RB_RESERVED = 23,
+ PERF_RB_2D_ALIVE_CYCLES = 24,
+ PERF_RB_2D_STALL_CYCLES_A2D = 25,
+ PERF_RB_2D_STARVE_CYCLES_SRC = 26,
+ PERF_RB_2D_STARVE_CYCLES_SP = 27,
+ PERF_RB_2D_STARVE_CYCLES_DST = 28,
+ PERF_RB_2D_VALID_PIXELS = 29,
+};
+
+enum a5xx_rb_samples_perfcounter_select {
+ TOTAL_SAMPLES = 0,
+ ZPASS_SAMPLES = 1,
+ ZFAIL_SAMPLES = 2,
+ SFAIL_SAMPLES = 3,
+};
+
+enum a5xx_vsc_perfcounter_select {
+ PERF_VSC_BUSY_CYCLES = 0,
+ PERF_VSC_WORKING_CYCLES = 1,
+ PERF_VSC_STALL_CYCLES_UCHE = 2,
+ PERF_VSC_EOT_NUM = 3,
+};
+
+enum a5xx_ccu_perfcounter_select {
+ PERF_CCU_BUSY_CYCLES = 0,
+ PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
+ PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
+ PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
+ PERF_CCU_DEPTH_BLOCKS = 4,
+ PERF_CCU_COLOR_BLOCKS = 5,
+ PERF_CCU_DEPTH_BLOCK_HIT = 6,
+ PERF_CCU_COLOR_BLOCK_HIT = 7,
+ PERF_CCU_PARTIAL_BLOCK_READ = 8,
+ PERF_CCU_GMEM_READ = 9,
+ PERF_CCU_GMEM_WRITE = 10,
+ PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
+ PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
+ PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
+ PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
+ PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
+ PERF_CCU_COLOR_READ_FLAG0_COUNT = 16,
+ PERF_CCU_COLOR_READ_FLAG1_COUNT = 17,
+ PERF_CCU_COLOR_READ_FLAG2_COUNT = 18,
+ PERF_CCU_COLOR_READ_FLAG3_COUNT = 19,
+ PERF_CCU_COLOR_READ_FLAG4_COUNT = 20,
+ PERF_CCU_2D_BUSY_CYCLES = 21,
+ PERF_CCU_2D_RD_REQ = 22,
+ PERF_CCU_2D_WR_REQ = 23,
+ PERF_CCU_2D_REORDER_STARVE_CYCLES = 24,
+ PERF_CCU_2D_PIXELS = 25,
+};
+
+enum a5xx_cmp_perfcounter_select {
+ PERF_CMPDECMP_STALL_CYCLES_VBIF = 0,
+ PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
+ PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
+ PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
+ PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
+ PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
+ PERF_CMPDECMP_VBIF_READ_DATA = 7,
+ PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
+ PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
+ PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 15,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 16,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 17,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 18,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 19,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 20,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 21,
+ PERF_CMPDECMP_2D_RD_DATA = 22,
+ PERF_CMPDECMP_2D_WR_DATA = 23,
+};
+
+enum a5xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+ A5XX_TEX_BUFFER = 4,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d
+
+#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e
+
+#define REG_A5XX_CP_ME_NRT_DATA 0x00000810
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT 29
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_WRITE(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_TRAP_WRITE__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_WRITE__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_READ__MASK 0x40000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT 30
+static inline uint32_t A5XX_CP_PROTECT_REG_TRAP_READ(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_TRAP_READ__SHIFT) & A5XX_CP_PROTECT_REG_TRAP_READ__MASK;
+}
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT 31
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK 0x40000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT 30
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP__MASK;
+}
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__MASK 0x20000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT 29
+static inline uint32_t A5XX_RBBM_STATUS_HLSQ_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_HLSQ_BUSY__SHIFT) & A5XX_RBBM_STATUS_HLSQ_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VSC_BUSY__MASK 0x10000000
+#define A5XX_RBBM_STATUS_VSC_BUSY__SHIFT 28
+static inline uint32_t A5XX_RBBM_STATUS_VSC_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VSC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VSC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TPL1_BUSY__MASK 0x08000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT 27
+static inline uint32_t A5XX_RBBM_STATUS_TPL1_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TPL1_BUSY__SHIFT) & A5XX_RBBM_STATUS_TPL1_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_SP_BUSY__MASK 0x04000000
+#define A5XX_RBBM_STATUS_SP_BUSY__SHIFT 26
+static inline uint32_t A5XX_RBBM_STATUS_SP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_SP_BUSY__SHIFT) & A5XX_RBBM_STATUS_SP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_UCHE_BUSY__MASK 0x02000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT 25
+static inline uint32_t A5XX_RBBM_STATUS_UCHE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_UCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_UCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VPC_BUSY__MASK 0x01000000
+#define A5XX_RBBM_STATUS_VPC_BUSY__SHIFT 24
+static inline uint32_t A5XX_RBBM_STATUS_VPC_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VPC_BUSY__SHIFT) & A5XX_RBBM_STATUS_VPC_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFDP_BUSY__MASK 0x00800000
+#define A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT 23
+static inline uint32_t A5XX_RBBM_STATUS_VFDP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VFDP_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFDP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VFD_BUSY__MASK 0x00400000
+#define A5XX_RBBM_STATUS_VFD_BUSY__SHIFT 22
+static inline uint32_t A5XX_RBBM_STATUS_VFD_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VFD_BUSY__SHIFT) & A5XX_RBBM_STATUS_VFD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TESS_BUSY__MASK 0x00200000
+#define A5XX_RBBM_STATUS_TESS_BUSY__SHIFT 21
+static inline uint32_t A5XX_RBBM_STATUS_TESS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TESS_BUSY__SHIFT) & A5XX_RBBM_STATUS_TESS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK 0x00100000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT 20
+static inline uint32_t A5XX_RBBM_STATUS_PC_VSD_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_PC_VSD_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_VSD_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK 0x00080000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT 19
+static inline uint32_t A5XX_RBBM_STATUS_PC_DCALL_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_PC_DCALL_BUSY__SHIFT) & A5XX_RBBM_STATUS_PC_DCALL_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK 0x00040000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT 18
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_DCOM_BUSY__MASK 0x00020000
+#define A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT 17
+static inline uint32_t A5XX_RBBM_STATUS_DCOM_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_DCOM_BUSY__SHIFT) & A5XX_RBBM_STATUS_DCOM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_COM_BUSY__MASK 0x00010000
+#define A5XX_RBBM_STATUS_COM_BUSY__SHIFT 16
+static inline uint32_t A5XX_RBBM_STATUS_COM_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_COM_BUSY__SHIFT) & A5XX_RBBM_STATUS_COM_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_LRZ_BUZY__MASK 0x00008000
+#define A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT 15
+static inline uint32_t A5XX_RBBM_STATUS_LRZ_BUZY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_LRZ_BUZY__SHIFT) & A5XX_RBBM_STATUS_LRZ_BUZY__MASK;
+}
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK 0x00004000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT 14
+static inline uint32_t A5XX_RBBM_STATUS_A2D_DSP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_A2D_DSP_BUSY__SHIFT) & A5XX_RBBM_STATUS_A2D_DSP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK 0x00002000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT 13
+static inline uint32_t A5XX_RBBM_STATUS_CCUFCHE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CCUFCHE_BUSY__SHIFT) & A5XX_RBBM_STATUS_CCUFCHE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RB_BUSY__MASK 0x00001000
+#define A5XX_RBBM_STATUS_RB_BUSY__SHIFT 12
+static inline uint32_t A5XX_RBBM_STATUS_RB_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_RB_BUSY__SHIFT) & A5XX_RBBM_STATUS_RB_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_RAS_BUSY__MASK 0x00000800
+#define A5XX_RBBM_STATUS_RAS_BUSY__SHIFT 11
+static inline uint32_t A5XX_RBBM_STATUS_RAS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_RAS_BUSY__SHIFT) & A5XX_RBBM_STATUS_RAS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_TSE_BUSY__MASK 0x00000400
+#define A5XX_RBBM_STATUS_TSE_BUSY__SHIFT 10
+static inline uint32_t A5XX_RBBM_STATUS_TSE_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_TSE_BUSY__SHIFT) & A5XX_RBBM_STATUS_TSE_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_VBIF_BUSY__MASK 0x00000200
+#define A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT 9
+static inline uint32_t A5XX_RBBM_STATUS_VBIF_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_VBIF_BUSY__SHIFT) & A5XX_RBBM_STATUS_VBIF_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK 0x00000100
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT 8
+static inline uint32_t A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__SHIFT) & A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT 7
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_BUSY__MASK 0x00000040
+#define A5XX_RBBM_STATUS_CP_BUSY__SHIFT 6
+static inline uint32_t A5XX_RBBM_STATUS_CP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK 0x00000020
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT 5
+static inline uint32_t A5XX_RBBM_STATUS_GPMU_MASTER_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__SHIFT) & A5XX_RBBM_STATUS_GPMU_MASTER_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK 0x00000010
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT 4
+static inline uint32_t A5XX_RBBM_STATUS_CP_CRASH_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_CRASH_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_CRASH_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK 0x00000008
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT 3
+static inline uint32_t A5XX_RBBM_STATUS_CP_ETS_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_ETS_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ETS_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK 0x00000004
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT 2
+static inline uint32_t A5XX_RBBM_STATUS_CP_PFP_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_PFP_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_PFP_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__MASK 0x00000002
+#define A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT 1
+static inline uint32_t A5XX_RBBM_STATUS_CP_ME_BUSY(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_STATUS_CP_ME_BUSY__SHIFT) & A5XX_RBBM_STATUS_CP_ME_BUSY__MASK;
+}
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+#define A5XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_AHB_ERROR 0x000004ed
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000bc2
+#define A5XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A5XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A5XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001fe00
+#define A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A5XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A5XX_VSC_SIZE_ADDRESS_LO 0x00000bc3
+
+#define REG_A5XX_VSC_SIZE_ADDRESS_HI 0x00000bc4
+
+#define REG_A5XX_UNKNOWN_0BC5 0x00000bc5
+
+#define REG_A5XX_UNKNOWN_0BC6 0x00000bc6
+
+static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000bd0 + 0x1*i0; }
+#define A5XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A5XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A5XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
+#define A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A5XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
+#define A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
+static inline uint32_t A5XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A5XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A5XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_LO(uint32_t i0) { return 0x00000be0 + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_ADDRESS_HI(uint32_t i0) { return 0x00000be1 + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c00 + 0x1*i0; }
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_RESOLVE_CNTL 0x00000cdd
+#define A5XX_VSC_RESOLVE_CNTL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_RESOLVE_CNTL_X__MASK 0x00007fff
+#define A5XX_VSC_RESOLVE_CNTL_X__SHIFT 0
+static inline uint32_t A5XX_VSC_RESOLVE_CNTL_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_RESOLVE_CNTL_X__SHIFT) & A5XX_VSC_RESOLVE_CNTL_X__MASK;
+}
+#define A5XX_VSC_RESOLVE_CNTL_Y__MASK 0x7fff0000
+#define A5XX_VSC_RESOLVE_CNTL_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_RESOLVE_CNTL_Y__SHIFT) & A5XX_VSC_RESOLVE_CNTL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04
+
+#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05
+
+#define REG_A5XX_PC_START_INDEX 0x00000d06
+
+#define REG_A5XX_PC_MAX_INDEX 0x00000d07
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_DBG_ECO_CNTL 0x00000e04
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+#define A5XX_VPC_DBG_ECO_CNTL_ALLFLATOPTDIS 0x00000400
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+#define A5XX_VPC_MODE_CNTL_BINNING_PASS 0x00000001
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_MODE_CNTL 0x00000e81
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_EN0 0x000030c0
+
+#define REG_A5XX_VBIF_PERF_CNT_EN1 0x000030c1
+
+#define REG_A5XX_VBIF_PERF_CNT_EN2 0x000030c2
+
+#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
+
+#define REG_A5XX_GRAS_VS_CL_CNTL 0x0000e001
+#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A5XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A5XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CNTL 0x0000e005
+#define A5XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
+#define A5XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
+#define A5XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
+#define A5XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008
+#define A5XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010
+#define A5XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020
+#define A5XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
+#define A5XX_GRAS_CNTL_COORD_MASK__SHIFT 6
+static inline uint32_t A5XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CNTL_COORD_MASK__SHIFT) & A5XX_GRAS_CNTL_COORD_MASK__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A5XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A5XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A5XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A5XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000
+#define A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13
+static inline uint32_t A5XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val)
+{
+ return ((val) << A5XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A5XX_GRAS_SU_CNTL_LINE_MODE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1 0x00000002
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+#define A5XX_GRAS_SC_CNTL_BINNING_PASS 0x00000001
+#define A5XX_GRAS_SC_CNTL_SAMPLES_PASSED 0x00008000
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+#define A5XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A5XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A5XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+#define A5XX_GRAS_LRZ_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_GRAS_LRZ_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_GRAS_LRZ_BUFFER_PITCH__SHIFT) & A5XX_GRAS_LRZ_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_BINNING_PASS 0x00000001
+#define A5XX_RB_RENDER_CNTL_SAMPLES_PASSED 0x00000040
+#define A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE 0x00000080
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A5XX_RB_RENDER_CNTL_FLAG_DEPTH2 0x00008000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_FLAG_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_FLAG_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_FLAG_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
+#define A5XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
+#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008
+#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010
+#define A5XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
+#define A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
+static inline uint32_t A5XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A5XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+#define REG_A5XX_RB_RENDER_COMPONENTS 0x0000e147
+#define A5XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A5XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A5XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A5XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_PITCH__SHIFT) & A5XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((_mesa_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+#define A5XX_RB_DEPTH_PLANE_CNTL_UNK1 0x00000002
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c1
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A5XX_RB_STENCIL_BASE_LO 0x0000e1c2
+
+#define REG_A5XX_RB_STENCIL_BASE_HI 0x0000e1c3
+
+#define REG_A5XX_RB_STENCIL_PITCH 0x0000e1c4
+#define A5XX_RB_STENCIL_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_PITCH__SHIFT) & A5XX_RB_STENCIL_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_ARRAY_PITCH 0x0000e1c5
+#define A5XX_RB_STENCIL_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_STENCIL_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_STENCIL_ARRAY_PITCH__SHIFT) & A5XX_RB_STENCIL_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_SAMPLE_COUNT_CONTROL 0x0000e1d1
+#define A5XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A5XX_RB_BLIT_CNTL 0x0000e210
+#define A5XX_RB_BLIT_CNTL_BUF__MASK 0x0000000f
+#define A5XX_RB_BLIT_CNTL_BUF__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_CNTL_BUF(enum a5xx_blit_buf val)
+{
+ return ((val) << A5XX_RB_BLIT_CNTL_BUF__SHIFT) & A5XX_RB_BLIT_CNTL_BUF__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001
+
+#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
+
+#define REG_A5XX_RB_BLIT_DST_HI 0x0000e215
+
+#define REG_A5XX_RB_BLIT_DST_PITCH 0x0000e216
+#define A5XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_PITCH__SHIFT) & A5XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_DST_ARRAY_PITCH 0x0000e217
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW0 0x0000e218
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW1 0x0000e219
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW2 0x0000e21a
+
+#define REG_A5XX_RB_CLEAR_COLOR_DW3 0x0000e21b
+
+#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
+#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004
+#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
+#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
+static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_CLEAR_CNTL_MASK__SHIFT) & A5XX_RB_CLEAR_CNTL_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x0000e243 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x0000e244 + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x0000e245 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t i0) { return 0x0000e246 + 0x4*i0; }
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_MRT_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_LO 0x0000e263
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_HI 0x0000e264
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_PITCH 0x0000e265
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH 0x0000e266
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__SHIFT) & A5XX_RB_BLIT_FLAG_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_LO 0x0000e267
+
+#define REG_A5XX_RB_SAMPLE_COUNT_ADDR_HI 0x0000e268
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+#define A5XX_VPC_CNTL_0_VARYING 0x00000800
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+#define REG_A5XX_UNKNOWN_E292 0x0000e292
+
+#define REG_A5XX_UNKNOWN_E293 0x0000e293
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_VPC_CLIP_CNTL 0x0000e29a
+#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A5XX_VPC_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+#define A5XX_VPC_PACK_PSIZELOC__MASK 0x0000ff00
+#define A5XX_VPC_PACK_PSIZELOC__SHIFT 8
+static inline uint32_t A5XX_VPC_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_PSIZELOC__SHIFT) & A5XX_VPC_PACK_PSIZELOC__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_VPC_SO_BUF_CNTL 0x0000e2a1
+#define A5XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
+#define A5XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
+#define A5XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
+#define A5XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
+#define A5XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+#define A5XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
+
+#define REG_A5XX_VPC_SO_CNTL 0x0000e2a3
+#define A5XX_VPC_SO_CNTL_ENABLE 0x00010000
+
+#define REG_A5XX_VPC_SO_PROG 0x0000e2a4
+#define A5XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A5XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A5XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A5XX_VPC_SO_PROG_A_BUF__SHIFT) & A5XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A5XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A5XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A5XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A5XX_VPC_SO_PROG_A_OFF__SHIFT) & A5XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A5XX_VPC_SO_PROG_A_EN 0x00000800
+#define A5XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A5XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A5XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A5XX_VPC_SO_PROG_B_BUF__SHIFT) & A5XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A5XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A5XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A5XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A5XX_VPC_SO_PROG_B_OFF__SHIFT) & A5XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A5XX_VPC_SO_PROG_B_EN 0x00800000
+
+static inline uint32_t REG_A5XX_VPC_SO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000e2a7 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000e2a8 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000e2a9 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000e2aa + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000e2ab + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000e2ac + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x0000e2ad + 0x7*i0; }
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100
+#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200
+#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
+
+#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
+#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
+
+#define REG_A5XX_PC_CLIP_CNTL 0x0000e389
+#define A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A5XX_PC_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A5XX_PC_CLIP_CNTL_CLIP_MASK__SHIFT) & A5XX_PC_CLIP_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_PC_GS_LAYERED 0x0000e38d
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A5XX_PC_HS_PARAM_CW 0x00800000
+#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A5XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONFIG 0x0000e584
+#define A5XX_SP_VS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONFIG 0x0000e585
+#define A5XX_SP_FS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONFIG 0x0000e586
+#define A5XX_SP_HS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONFIG 0x0000e587
+#define A5XX_SP_DS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONFIG 0x0000e588
+#define A5XX_SP_GS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+#define A5XX_SP_CS_CONFIG_ENABLED 0x00000001
+#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_CS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_BUFFER 0x00000004
+#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_SP_PRIMITIVE_CNTL 0x0000e592
+#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
+#define A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
+static inline uint32_t A5XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+{
+ return ((val) << A5XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A5XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5AB 0x0000e5ab
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_BUFFER 0x00000004
+#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5C2 0x0000e5c2
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
+
+#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
+#define A5XX_SP_CS_CTRL_REG0_BUFFER 0x00000004
+#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_CS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
+
+#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
+
+#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
+
+#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
+#define A5XX_SP_HS_CTRL_REG0_BUFFER 0x00000004
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E602 0x0000e602
+
+#define REG_A5XX_SP_HS_OBJ_START_LO 0x0000e603
+
+#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
+
+#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
+#define A5XX_SP_DS_CTRL_REG0_BUFFER 0x00000004
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
+
+#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
+
+#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
+
+#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
+#define A5XX_SP_GS_CTRL_REG0_BUFFER 0x00000004
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
+
+#define REG_A5XX_SP_GS_OBJ_START_LO 0x0000e65c
+
+#define REG_A5XX_SP_GS_OBJ_START_HI 0x0000e65d
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_TPL1_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_TPL1_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000e706
+
+#define REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000e707
+
+#define REG_A5XX_TPL1_VS_TEX_COUNT 0x0000e700
+
+#define REG_A5XX_TPL1_HS_TEX_COUNT 0x0000e701
+
+#define REG_A5XX_TPL1_DS_TEX_COUNT 0x0000e702
+
+#define REG_A5XX_TPL1_GS_TEX_COUNT 0x0000e703
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_HS_TEX_SAMP_LO 0x0000e724
+
+#define REG_A5XX_TPL1_HS_TEX_SAMP_HI 0x0000e725
+
+#define REG_A5XX_TPL1_DS_TEX_SAMP_LO 0x0000e726
+
+#define REG_A5XX_TPL1_DS_TEX_SAMP_HI 0x0000e727
+
+#define REG_A5XX_TPL1_GS_TEX_SAMP_LO 0x0000e728
+
+#define REG_A5XX_TPL1_GS_TEX_SAMP_HI 0x0000e729
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_HS_TEX_CONST_LO 0x0000e72c
+
+#define REG_A5XX_TPL1_HS_TEX_CONST_HI 0x0000e72d
+
+#define REG_A5XX_TPL1_DS_TEX_CONST_LO 0x0000e72e
+
+#define REG_A5XX_TPL1_DS_TEX_CONST_HI 0x0000e72f
+
+#define REG_A5XX_TPL1_GS_TEX_CONST_LO 0x0000e730
+
+#define REG_A5XX_TPL1_GS_TEX_CONST_HI 0x0000e731
+
+#define REG_A5XX_TPL1_FS_TEX_COUNT 0x0000e750
+
+#define REG_A5XX_TPL1_CS_TEX_COUNT 0x0000e751
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_CS_TEX_SAMP_LO 0x0000e75c
+
+#define REG_A5XX_TPL1_CS_TEX_SAMP_HI 0x0000e75d
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_CS_TEX_CONST_LO 0x0000e760
+
+#define REG_A5XX_TPL1_CS_TEX_CONST_HI 0x0000e761
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000001
+#define A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK 0x00000004
+#define A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT 2
+static inline uint32_t A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__SHIFT) & A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK 0x0000003f
+#define A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__SHIFT) & A5XX_HLSQ_CONTROL_1_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONFIG 0x0000e78b
+#define A5XX_HLSQ_VS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONFIG 0x0000e78c
+#define A5XX_HLSQ_FS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONFIG 0x0000e78d
+#define A5XX_HLSQ_HS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONFIG 0x0000e78e
+#define A5XX_HLSQ_DS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONFIG 0x0000e78f
+#define A5XX_HLSQ_GS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+#define A5XX_HLSQ_CS_CONFIG_ENABLED 0x00000001
+#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+#define A5XX_HLSQ_VS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_VS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+#define A5XX_HLSQ_FS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_FS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CNTL 0x0000e793
+#define A5XX_HLSQ_HS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_HS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CNTL 0x0000e794
+#define A5XX_HLSQ_DS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_DS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CNTL 0x0000e795
+#define A5XX_HLSQ_GS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_GS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+#define A5XX_HLSQ_CS_CNTL_SSBO_ENABLE 0x00000001
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK 0xfffffffe
+#define A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT 1
+static inline uint32_t A5XX_HLSQ_CS_CNTL_INSTRLEN(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_INSTRLEN__SHIFT) & A5XX_HLSQ_CS_CNTL_INSTRLEN__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A5XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
+#define A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK0__MASK;
+}
+#define A5XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
+#define A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A5XX_HLSQ_CS_CNTL_0_UNK1__MASK;
+}
+#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_UNKNOWN_E7C0 0x0000e7c0
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_UNKNOWN_E7C5 0x0000e7c5
+
+#define REG_A5XX_HLSQ_HS_CONSTLEN 0x0000e7c8
+
+#define REG_A5XX_HLSQ_HS_INSTRLEN 0x0000e7c9
+
+#define REG_A5XX_UNKNOWN_E7CA 0x0000e7ca
+
+#define REG_A5XX_HLSQ_DS_CONSTLEN 0x0000e7cd
+
+#define REG_A5XX_HLSQ_DS_INSTRLEN 0x0000e7ce
+
+#define REG_A5XX_UNKNOWN_E7CF 0x0000e7cf
+
+#define REG_A5XX_HLSQ_GS_CONSTLEN 0x0000e7d2
+
+#define REG_A5XX_HLSQ_GS_INSTRLEN 0x0000e7d3
+
+#define REG_A5XX_UNKNOWN_E7D4 0x0000e7d4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_UNKNOWN_E7D9 0x0000e7d9
+
+#define REG_A5XX_HLSQ_CS_CONSTLEN 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
+
+#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW2 0x00002103
+
+#define REG_A5XX_RB_2D_SRC_SOLID_DW3 0x00002104
+
+#define REG_A5XX_RB_2D_SRC_INFO 0x00002107
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000
+#define A5XX_RB_2D_SRC_INFO_SRGB 0x00002000
+
+#define REG_A5XX_RB_2D_SRC_LO 0x00002108
+
+#define REG_A5XX_RB_2D_SRC_HI 0x00002109
+
+#define REG_A5XX_RB_2D_SRC_SIZE 0x0000210a
+#define A5XX_RB_2D_SRC_SIZE_PITCH__MASK 0x0000ffff
+#define A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_PITCH__MASK;
+}
+#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK 0xffff0000
+#define A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT 16
+static inline uint32_t A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_SRC_SIZE_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_2D_DST_INFO 0x00002110
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000
+#define A5XX_RB_2D_DST_INFO_SRGB 0x00002000
+
+#define REG_A5XX_RB_2D_DST_LO 0x00002111
+
+#define REG_A5XX_RB_2D_DST_HI 0x00002112
+
+#define REG_A5XX_RB_2D_DST_SIZE 0x00002113
+#define A5XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
+#define A5XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_PITCH__MASK;
+}
+#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK 0xffff0000
+#define A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT 16
+static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__SHIFT) & A5XX_RB_2D_DST_SIZE_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_LO 0x00002140
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_HI 0x00002141
+
+#define REG_A5XX_RB_2D_SRC_FLAGS_PITCH 0x00002142
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__MASK 0xffffffff
+#define A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_SRC_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_SRC_FLAGS_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_2D_DST_FLAGS_LO 0x00002143
+
+#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+
+#define REG_A5XX_RB_2D_DST_FLAGS_PITCH 0x00002145
+#define A5XX_RB_2D_DST_FLAGS_PITCH__MASK 0xffffffff
+#define A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A5XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
+#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
+
+#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000
+#define A5XX_GRAS_2D_SRC_INFO_SRGB 0x00002000
+
+#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000
+#define A5XX_GRAS_2D_DST_INFO_SRGB 0x00002000
+
+#define REG_A5XX_UNKNOWN_2184 0x00002184
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xffffff80
+#define A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 7
+static inline uint32_t A5XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
+{
+ return ((val) << A5XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A5XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A5XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_0_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_TEX_CONST_0_TILE_MODE__SHIFT) & A5XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A5XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+#define A5XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A5XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A5XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWAP__SHIFT) & A5XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_BUFFER 0x00000010
+#define A5XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A5XX_TEX_CONST_2_PITCHALIGN__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0xe0000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A5XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
+#define A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
+static inline uint32_t A5XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_TILE_ALL 0x08000000
+#define A5XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE_LO__SHIFT) & A5XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A5XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_BASE_HI__SHIFT) & A5XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+#define REG_A5XX_SSBO_0_0 0x00000000
+#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0
+#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_0_1 0x00000001
+#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A5XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_2 0x00000002
+#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_3 0x00000003
+#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A5XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A5XX_SSBO_1_0 0x00000000
+#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A5XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK;
+}
+#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A5XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A5XX_SSBO_1_1 0x00000001
+#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A5XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK;
+}
+
+#define REG_A5XX_SSBO_2_0 0x00000000
+#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff
+#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_2_1 0x00000001
+#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff
+#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
+}
+
+#define REG_A5XX_UBO_0 0x00000000
+#define A5XX_UBO_0_BASE_LO__MASK 0xffffffff
+#define A5XX_UBO_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_UBO_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_UBO_0_BASE_LO__SHIFT) & A5XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_UBO_1 0x00000001
+#define A5XX_UBO_1_BASE_HI__MASK 0x0001ffff
+#define A5XX_UBO_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_UBO_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_UBO_1_BASE_HI__SHIFT) & A5XX_UBO_1_BASE_HI__MASK;
+}
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
new file mode 100644
index 0000000000..169b8fe688
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_print.h>
+
+#include "a5xx_gpu.h"
+
+static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "PFP state:\n");
+
+ for (i = 0; i < 36; i++) {
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
+ }
+}
+
+static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ME state:\n");
+
+ for (i = 0; i < 29; i++) {
+ gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
+ }
+}
+
+static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "MEQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 64; i++) {
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+ }
+}
+
+static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ROQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 512 / 4; i++) {
+ uint32_t val[4];
+ int j;
+ for (j = 0; j < 4; j++)
+ val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA);
+ drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
+ val[0], val[1], val[2], val[3]);
+ }
+}
+
+static int show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+ node->info_ent->data;
+
+ show(priv->gpu, &p);
+ return 0;
+}
+
+#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
+static struct drm_info_list a5xx_debugfs_list[] = {
+ ENT(pfp),
+ ENT(me),
+ ENT(meq),
+ ENT(roq),
+};
+
+/* for debugfs files that can be written to, we can't use drm helper: */
+static int
+reset_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ /* TODO do we care about trying to make sure the GPU is idle?
+ * Since this is just a debug feature limited to CAP_SYS_ADMIN,
+ * maybe it is fine to let the user keep both pieces if they
+ * try to reset an active GPU.
+ */
+
+ mutex_lock(&gpu->lock);
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
+ adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]);
+ adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
+
+ if (a5xx_gpu->pm4_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ }
+
+ gpu->needs_hw_init = true;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ gpu->funcs->recover(gpu);
+
+ pm_runtime_put_sync(&gpu->pdev->dev);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
+
+
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+{
+ struct drm_device *dev;
+
+ if (!minor)
+ return;
+
+ dev = minor->dev;
+
+ drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
+
+ debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev,
+ &reset_fops);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 0000000000..e5916c1067
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,1789 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/pm_opp.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/slab.h>
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "a5xx_gpu.h"
+
+extern bool hang_debug;
+static void a5xx_dump(struct msm_gpu *gpu);
+
+#define GPU_PAS_ID 13
+
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (a5xx_gpu->has_whereami) {
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+ }
+}
+
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ bool sync)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ /*
+ * Most flush operations need to issue a WHERE_AM_I opcode to sync up
+ * the rptr shadow
+ */
+ if (sync)
+ update_shadow_rptr(gpu, ring);
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct msm_ringbuffer *ring = submit->ring;
+ struct drm_gem_object *obj;
+ uint32_t *ptr, dwords;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ /* copy commands into RB: */
+ obj = submit->bos[submit->cmd[i].idx].obj;
+ dwords = submit->cmd[i].size;
+
+ ptr = msm_gem_get_vaddr(obj);
+
+ /* _get_vaddr() shouldn't fail at this point,
+ * since we've already mapped it once in
+ * submit_reloc()
+ */
+ if (WARN_ON(IS_ERR_OR_NULL(ptr)))
+ return;
+
+ for (i = 0; i < dwords; i++) {
+ /* normally the OUT_PKTn() would wait
+ * for space for the packet. But since
+ * we just OUT_RING() the whole thing,
+ * need to call adreno_wait_ring()
+ * ourself:
+ */
+ adreno_wait_ring(ring, 1);
+ OUT_RING(ring, ptr[i]);
+ }
+
+ msm_gem_put_vaddr(obj);
+
+ break;
+ }
+ }
+
+ a5xx_flush(gpu, ring, true);
+ a5xx_preempt_trigger(gpu);
+
+ /* we might not necessarily have a cmd from userspace to
+ * trigger an event to know that submit has completed, so
+ * do this manually:
+ */
+ a5xx_idle(gpu, ring);
+ ring->memptrs->fence = submit->seqno;
+ msm_gpu_retire(gpu);
+}
+
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i, ibs = 0;
+
+ if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
+ gpu->cur_ctx_seqno = 0;
+ a5xx_submit_in_rb(gpu, submit);
+ return;
+ }
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x1);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
+ }
+
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+ CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ /* A WHERE_AM_I packet is not needed after a YIELD */
+ a5xx_flush(gpu, ring, false);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
+}
+
+static const struct adreno_five_hwcg_regs {
+ u32 offset;
+ u32 value;
+} a5xx_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+}, a50x_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+}, a512_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct adreno_five_hwcg_regs *regs;
+ unsigned int i, sz;
+
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
+ regs = a50x_hwcg;
+ sz = ARRAY_SIZE(a50x_hwcg);
+ } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
+ regs = a512_hwcg;
+ sz = ARRAY_SIZE(a512_hwcg);
+ } else {
+ regs = a5xx_hwcg;
+ sz = ARRAY_SIZE(a5xx_hwcg);
+ }
+
+ for (i = 0; i < sz; i++)
+ gpu_write(gpu, regs[i].offset,
+ state ? regs[i].value : 0);
+
+ if (adreno_is_a540(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
+ }
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else if (adreno_is_a510(adreno_gpu)) {
+ /* Workaround for token and syncs */
+ OUT_RING(ring, 0x00000001);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a5xx_flush(gpu, ring, true);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
+ a5xx_flush(gpu, ring, false);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
+ struct drm_gem_object *obj)
+{
+ u32 *buf = msm_gem_get_vaddr(obj);
+
+ if (IS_ERR(buf))
+ return;
+
+ /*
+ * If the lowest nibble is 0xa that is an indication that this microcode
+ * has been patched. The actual version is in dword [3] but we only care
+ * about the patchlevel which is the lowest nibble of dword [3]
+ */
+ if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+ a5xx_gpu->has_whereami = true;
+
+ msm_gem_put_vaddr(obj);
+}
+
+static int a5xx_ucode_load(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
+
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+
+ msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+
+ msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
+ a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->has_whereami) {
+ if (!a5xx_gpu->shadow_bo) {
+ a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+ sizeof(u32) * gpu->nr_rings,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a5xx_gpu->shadow_bo,
+ &a5xx_gpu->shadow_iova);
+
+ if (IS_ERR(a5xx_gpu->shadow))
+ return PTR_ERR(a5xx_gpu->shadow);
+
+ msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
+ }
+ } else if (gpu->nr_rings > 1) {
+ /* Disable preemption if WHERE_AM_I isn't available */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+ }
+
+ return 0;
+}
+
+#define SCM_GPU_ZAP_SHADER_RESUME 0
+
+static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ /*
+ * Adreno 506 have CPZ Retention feature and doesn't require
+ * to resume zap shader
+ */
+ if (adreno_is_a506(adreno_gpu))
+ return 0;
+
+ ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
+ if (ret)
+ DRM_ERROR("%s: zap-shader resume failed: %d\n",
+ gpu->name, ret);
+
+ return ret;
+}
+
+static int a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ /*
+ * If the zap shader is already loaded into memory we just need to kick
+ * the remote processor to reinitialize it
+ */
+ if (loaded)
+ return a5xx_zap_shader_resume(gpu);
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ u32 regbit;
+ int ret;
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* Select RBBM0 to countable 6 to get the busy status for devfreq */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range (0 to gpu->gmem) */
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ 0x00100000 + adreno_gpu->info->gmem - 1);
+ gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
+
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ else
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
+ } else {
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ else
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+ }
+
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x100 << 11 | 0x100 << 22));
+ else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
+ adreno_is_a512(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x200 << 11 | 0x200 << 22));
+ else
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
+ (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ /*
+ * Disable the RB sampler datapath DP2 clock gating optimization
+ * for 1-SP GPUs, as it is enabled by default.
+ */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
+
+ /* Disable UCHE global filter as SP can invalidate/flush independently */
+ gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /*
+ * In A5x, CCU can send context_done event of a particular context to
+ * UCHE which ultimately reaches CP even when there is valid
+ * transaction of that context inside CCU. This can let CP to program
+ * config registers, which will make the "valid transaction" inside
+ * CCU to be interpreted differently. This can cause gpu fault. This
+ * bug is fixed in latest A510 revision. To enable this bug fix -
+ * bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
+ * (disable). For older A510 version this bit is unused.
+ */
+ if (adreno_is_a510(adreno_gpu))
+ gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
+
+ /* Enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit */
+ if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
+ regbit = 2;
+ else
+ regbit = 1;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
+
+ if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
+ adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
+
+ /* Disable All flat shading optimization (ALLFLATOPTDIS) */
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ /* SMMU */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Put the GPU into 64 bit by default */
+ gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /*
+ * VPC corner case with local memory load kill leads to corrupt
+ * internal state. Normal Disable does not work for all a5x chips.
+ * So do the following setting to disable it.
+ */
+ if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
+ gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
+ gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
+ }
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
+ a5xx_gpmu_ucode_init(gpu);
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
+
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
+
+ /*
+ * If the microcode supports the WHERE_AM_I opcode then we can use that
+ * in lieu of the RPTR shadow and enable preemption. Otherwise, we
+ * can't safely use the RPTR shadow or preemption. In either case, the
+ * RPTR shadow should be disabled in hardware.
+ */
+ gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Configure the RPTR shadow if needed: */
+ if (a5xx_gpu->shadow_bo) {
+ gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+ shadowptr(a5xx_gpu, gpu->rb[0]));
+ }
+
+ a5xx_preempt_hw_init(gpu);
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
+
+ a5xx_flush(gpu, gpu->rb[0], true);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ }
+
+ /*
+ * If the chip that we are using does support loading one, then
+ * try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a5xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a5xx_flush(gpu, gpu->rb[0], true);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ } else {
+ return ret;
+ }
+
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
+ }
+
+ if (hang_debug)
+ a5xx_dump(gpu);
+
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ a5xx_preempt_fini(gpu);
+
+ if (a5xx_gpu->pm4_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->gpmu_bo);
+ }
+
+ if (a5xx_gpu->shadow_bo) {
+ msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+ drm_gem_object_put(a5xx_gpu->shadow_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (ring != a5xx_gpu->cur_ring) {
+ WARN(1, "Tried to idle a non-current ringbuffer\n");
+ return false;
+ }
+
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_gpu *gpu = arg;
+ struct adreno_smmu_fault_info *info = data;
+ char block[12] = "unknown";
+ u32 scratch[] = {
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)),
+ };
+
+ if (info)
+ snprintf(block, sizeof(block), "%x", info->fsynr1);
+
+ return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24))
+ return;
+
+ DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ /*
+ * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
+ * before the source is cleared the interrupt will storm.
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+ if (priv->disable_err_irq) {
+ status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
+ A5XX_RBBM_INT_0_MASK_CP_SW;
+ }
+
+ /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu, status);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ a5xx_preempt_trigger(gpu);
+ msm_gpu_retire(gpu);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
+ 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+ 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+ 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+ 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+ 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+ 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+ 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+ 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+ 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+ 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+ 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+ 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+ 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+ 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+ 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+ 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+ 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+ 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+ 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+ 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+ 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+ 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+ 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+ 0xAC60, 0xAC60, ~0,
+};
+
+static void a5xx_dump(struct msm_gpu *gpu)
+{
+ DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
+ /* Halt the sp_input_clk at HM level */
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
+ a5xx_set_hwcg(gpu, true);
+ /* Turn on sp_input_clk at HM level */
+ gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
+ return 0;
+ }
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ u32 mask = 0xf;
+ int i, ret;
+
+ /* A506, A508, A510 have 3 XIN ports in VBIF */
+ if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
+ adreno_is_a510(adreno_gpu))
+ mask = 0x7;
+
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
+ mask) == mask);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries on Adreno A510 and A530 (the others will tend to lock up)
+ */
+ if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (a5xx_gpu->has_whereami)
+ for (i = 0; i < gpu->nr_rings; i++)
+ a5xx_gpu->shadow[i] = 0;
+
+ return 0;
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
+
+ return 0;
+}
+
+struct a5xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a5xx_gpu_state {
+ struct msm_gpu_state base;
+ u32 *hlsqregs;
+};
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new(gpu->dev,
+ SZ_1M, MSM_BO_WC, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+ val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+ { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+ { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+ struct a5xx_gpu_state *a5xx_state)
+{
+ struct a5xx_crashdumper dumper = { 0 };
+ u32 offset, count = 0;
+ u64 *ptr;
+ int i;
+
+ if (a5xx_crashdumper_init(gpu, &dumper))
+ return;
+
+ /* The script will be written at offset 0 */
+ ptr = dumper.ptr;
+
+ /* Start writing the data at offset 256k */
+ offset = dumper.iova + (256 * SZ_1K);
+
+ /* Count how many additional registers to get from the HLSQ aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ count += a5xx_hlsq_aperture_regs[i].count;
+
+ a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ /* Build the crashdump script */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 type = a5xx_hlsq_aperture_regs[i].type;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ /* Write the register to select the desired bank */
+ *ptr++ = ((u64) type << 8);
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+ (1 << 21) | 1;
+
+ *ptr++ = offset;
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+ | c;
+
+ offset += c * sizeof(u32);
+ }
+
+ /* Write two zeros to close off the script */
+ *ptr++ = 0;
+ *ptr++ = 0;
+
+ if (a5xx_crashdumper_run(gpu, &dumper)) {
+ kfree(a5xx_state->hlsqregs);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace);
+ return;
+ }
+
+ /* Copy the data from the crashdumper to the state */
+ memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+ count * sizeof(u32));
+
+ msm_gem_kernel_put(dumper.bo, gpu->aspace);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+ GFP_KERNEL);
+ bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24));
+
+ if (!a5xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Temporarily disable hardware clock gating before reading the hw */
+ a5xx_set_hwcg(gpu, false);
+
+ /* First get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+ a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ /*
+ * Get the HLSQ regs with the help of the crashdumper, but only if
+ * we are not stalled in an iommu fault (in which case the crashdumper
+ * would not have access to memory)
+ */
+ if (!stalled)
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
+ a5xx_set_hwcg(gpu, true);
+
+ return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ kfree(a5xx_state->hlsqregs);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a5xx_state);
+}
+
+static int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ int i, j;
+ u32 pos = 0;
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ /* Dump the additional a5xx HLSQ registers */
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ drm_printf(p, "registers-hlsq:\n");
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ for (j = 0; j < c; j++, pos++, o++) {
+ /*
+ * To keep the crashdump simple we pull the entire range
+ * for each register type but not all of the registers
+ * in the range are valid. Fortunately invalid registers
+ * stick out like a sore thumb with a value of
+ * 0xdeadbeef
+ */
+ if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ o << 2, a5xx_state->hlsqregs[pos]);
+ }
+ }
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
+static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ u64 busy_cycles;
+
+ busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
+ *out_sample_rate = clk_get_rate(gpu->core_clk);
+
+ return busy_cycles;
+}
+
+static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (a5xx_gpu->has_whereami)
+ return a5xx_gpu->shadow[ring->id];
+
+ return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a5xx_hw_init,
+ .ucode_load = a5xx_ucode_load,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .submit = a5xx_submit,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = a5xx_debugfs_init,
+#endif
+ .gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
+ .create_address_space = adreno_create_address_space,
+ .get_rptr = a5xx_get_rptr,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+static void check_speed_bin(struct device *dev)
+{
+ struct nvmem_cell *cell;
+ u32 val;
+
+ /*
+ * If the OPP table specifies a opp-supported-hw property then we have
+ * to set something with dev_pm_opp_set_supported_hw() or the table
+ * doesn't get populated so pick an arbitrary value that should
+ * ensure the default frequencies are selected but not conflict with any
+ * actual bins
+ */
+ val = 0x80;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+
+ if (!IS_ERR(cell)) {
+ void *buf = nvmem_cell_read(cell, NULL);
+
+ if (!IS_ERR(buf)) {
+ u8 bin = *((u8 *) buf);
+
+ val = (1 << bin);
+ kfree(buf);
+ }
+
+ nvmem_cell_put(cell);
+ }
+
+ devm_pm_opp_set_supported_hw(dev, &val, 1);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ unsigned int nr_rings;
+ int ret;
+
+ if (!pdev) {
+ DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ adreno_gpu->registers = a5xx_registers;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ check_speed_bin(&pdev->dev);
+
+ nr_rings = 4;
+
+ if (config->info->revn == 510)
+ nr_rings = 1;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 0000000000..c7187bcc5e
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+struct a5xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
+
+ struct drm_gem_object *shadow_bo;
+ uint64_t shadow_iova;
+ uint32_t *shadow;
+
+ /* True if the microcode supports the WHERE_AM_I opcode */
+ bool has_whereami;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+#ifdef CONFIG_DEBUG_FS
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+#endif
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_ABORT,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocation for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
+ ((ring)->id * sizeof(uint32_t)))
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_ABORT);
+}
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 0000000000..7705f80104
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+ u32 ret = 0;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+ }
+
+ return ret;
+}
+
+/* Setup thermal limit management */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ /* Hard code the A530 GPU thermal sensor ID for the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ /* The threshold is fixed at 6000 for A530 */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /* Write the max power - hard coded to 5448 for A530 */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 config;
+
+ /* The battery current limiter isn't enabled for A540 */
+ config = AGC_LM_CONFIG_BCL_DISABLED;
+ config |= adreno_patchid(adreno_gpu) << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+ /* For now disable GPMU side throttling */
+ config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+ /* Until we get clock scaling 0 is always the active power level */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ /* Fixed at 6000 for now */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
+
+ gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+ PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ a5xx_flush(gpu, ring, true);
+
+ if (!a5xx_idle(gpu, ring)) {
+ DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE))
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+
+ if (!adreno_is_a530(adreno_gpu)) {
+ u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+ if (val)
+ DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
+ gpu->name, val);
+ }
+
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* This init sequence only applies to A530 */
+ if (!adreno_is_a530(adreno_gpu))
+ return;
+
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ /* Not all A5xx chips have a GPMU */
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
+ return 0;
+
+ /* Set up the limits management */
+ if (adreno_is_a530(adreno_gpu))
+ a530_lm_setup(gpu);
+ else if (adreno_is_a540(adreno_gpu))
+ a540_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ uint32_t dwords = 0, offset = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
+ return;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
+ (data[0] < 2) || (data[0] >=
+ (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
+ return;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ return;
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ ptr = msm_gem_kernel_new(drm, bosize,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
+ &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+ if (IS_ERR(ptr))
+ return;
+
+ msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
+ a5xx_gpu->gpmu_dwords = dwords;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 0000000000..f58dd564d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ */
+
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(struct timer_list *t)
+{
+ struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ /*
+ * Its possible that while a preemption request is in progress
+ * from an irq context, a user context trying to submit might
+ * fail to update the write pointer, because it determines
+ * that the preempt state is not PREEMPT_NONE.
+ *
+ * Close the race by introducing an intermediate
+ * state PREEMPT_ABORT to let the submit path
+ * know that the ringbuffer is not going to change
+ * and can safely update the write pointer.
+ */
+
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings == 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ a5xx_gpu->preempt[i]->wptr = 0;
+ a5xx_gpu->preempt[i]->rptr = 0;
+ a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+ a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
+ }
+
+ /* Write a 0 to signal that we aren't switching pagetables */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ void *counters;
+ struct drm_gem_object *bo = NULL, *counters_bo = NULL;
+ u64 iova = 0, counters_iova = 0;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ /* The buffer to store counters needs to be unprivileged */
+ counters = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
+ if (IS_ERR(counters)) {
+ msm_gem_kernel_put(bo, gpu->aspace);
+ return PTR_ERR(counters);
+ }
+
+ msm_gem_object_set_name(bo, "preempt");
+ msm_gem_object_set_name(counters_bo, "preempt_counters");
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
+
+ ptr->counter = counters_iova;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+
+ return;
+ }
+ }
+
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
new file mode 100644
index 0000000000..1c051535fd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -0,0 +1,8256 @@
+#ifndef A6XX_XML
+#define A6XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2023 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a6xx_tile_mode {
+ TILE6_LINEAR = 0,
+ TILE6_2 = 2,
+ TILE6_3 = 3,
+};
+
+enum a6xx_format {
+ FMT6_A8_UNORM = 2,
+ FMT6_8_UNORM = 3,
+ FMT6_8_SNORM = 4,
+ FMT6_8_UINT = 5,
+ FMT6_8_SINT = 6,
+ FMT6_4_4_4_4_UNORM = 8,
+ FMT6_5_5_5_1_UNORM = 10,
+ FMT6_1_5_5_5_UNORM = 12,
+ FMT6_5_6_5_UNORM = 14,
+ FMT6_8_8_UNORM = 15,
+ FMT6_8_8_SNORM = 16,
+ FMT6_8_8_UINT = 17,
+ FMT6_8_8_SINT = 18,
+ FMT6_L8_A8_UNORM = 19,
+ FMT6_16_UNORM = 21,
+ FMT6_16_SNORM = 22,
+ FMT6_16_FLOAT = 23,
+ FMT6_16_UINT = 24,
+ FMT6_16_SINT = 25,
+ FMT6_8_8_8_UNORM = 33,
+ FMT6_8_8_8_SNORM = 34,
+ FMT6_8_8_8_UINT = 35,
+ FMT6_8_8_8_SINT = 36,
+ FMT6_8_8_8_8_UNORM = 48,
+ FMT6_8_8_8_X8_UNORM = 49,
+ FMT6_8_8_8_8_SNORM = 50,
+ FMT6_8_8_8_8_UINT = 51,
+ FMT6_8_8_8_8_SINT = 52,
+ FMT6_9_9_9_E5_FLOAT = 53,
+ FMT6_10_10_10_2_UNORM = 54,
+ FMT6_10_10_10_2_UNORM_DEST = 55,
+ FMT6_10_10_10_2_SNORM = 57,
+ FMT6_10_10_10_2_UINT = 58,
+ FMT6_10_10_10_2_SINT = 59,
+ FMT6_11_11_10_FLOAT = 66,
+ FMT6_16_16_UNORM = 67,
+ FMT6_16_16_SNORM = 68,
+ FMT6_16_16_FLOAT = 69,
+ FMT6_16_16_UINT = 70,
+ FMT6_16_16_SINT = 71,
+ FMT6_32_UNORM = 72,
+ FMT6_32_SNORM = 73,
+ FMT6_32_FLOAT = 74,
+ FMT6_32_UINT = 75,
+ FMT6_32_SINT = 76,
+ FMT6_32_FIXED = 77,
+ FMT6_16_16_16_UNORM = 88,
+ FMT6_16_16_16_SNORM = 89,
+ FMT6_16_16_16_FLOAT = 90,
+ FMT6_16_16_16_UINT = 91,
+ FMT6_16_16_16_SINT = 92,
+ FMT6_16_16_16_16_UNORM = 96,
+ FMT6_16_16_16_16_SNORM = 97,
+ FMT6_16_16_16_16_FLOAT = 98,
+ FMT6_16_16_16_16_UINT = 99,
+ FMT6_16_16_16_16_SINT = 100,
+ FMT6_32_32_UNORM = 101,
+ FMT6_32_32_SNORM = 102,
+ FMT6_32_32_FLOAT = 103,
+ FMT6_32_32_UINT = 104,
+ FMT6_32_32_SINT = 105,
+ FMT6_32_32_FIXED = 106,
+ FMT6_32_32_32_UNORM = 112,
+ FMT6_32_32_32_SNORM = 113,
+ FMT6_32_32_32_UINT = 114,
+ FMT6_32_32_32_SINT = 115,
+ FMT6_32_32_32_FLOAT = 116,
+ FMT6_32_32_32_FIXED = 117,
+ FMT6_32_32_32_32_UNORM = 128,
+ FMT6_32_32_32_32_SNORM = 129,
+ FMT6_32_32_32_32_FLOAT = 130,
+ FMT6_32_32_32_32_UINT = 131,
+ FMT6_32_32_32_32_SINT = 132,
+ FMT6_32_32_32_32_FIXED = 133,
+ FMT6_G8R8B8R8_422_UNORM = 140,
+ FMT6_R8G8R8B8_422_UNORM = 141,
+ FMT6_R8_G8B8_2PLANE_420_UNORM = 142,
+ FMT6_NV21 = 143,
+ FMT6_R8_G8_B8_3PLANE_420_UNORM = 144,
+ FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 = 145,
+ FMT6_NV12_Y = 148,
+ FMT6_NV12_UV = 149,
+ FMT6_NV12_VU = 150,
+ FMT6_NV12_4R = 151,
+ FMT6_NV12_4R_Y = 152,
+ FMT6_NV12_4R_UV = 153,
+ FMT6_P010 = 154,
+ FMT6_P010_Y = 155,
+ FMT6_P010_UV = 156,
+ FMT6_TP10 = 157,
+ FMT6_TP10_Y = 158,
+ FMT6_TP10_UV = 159,
+ FMT6_Z24_UNORM_S8_UINT = 160,
+ FMT6_ETC2_RG11_UNORM = 171,
+ FMT6_ETC2_RG11_SNORM = 172,
+ FMT6_ETC2_R11_UNORM = 173,
+ FMT6_ETC2_R11_SNORM = 174,
+ FMT6_ETC1 = 175,
+ FMT6_ETC2_RGB8 = 176,
+ FMT6_ETC2_RGBA8 = 177,
+ FMT6_ETC2_RGB8A1 = 178,
+ FMT6_DXT1 = 179,
+ FMT6_DXT3 = 180,
+ FMT6_DXT5 = 181,
+ FMT6_RGTC1_UNORM = 183,
+ FMT6_RGTC1_SNORM = 184,
+ FMT6_RGTC2_UNORM = 187,
+ FMT6_RGTC2_SNORM = 188,
+ FMT6_BPTC_UFLOAT = 190,
+ FMT6_BPTC_FLOAT = 191,
+ FMT6_BPTC = 192,
+ FMT6_ASTC_4x4 = 193,
+ FMT6_ASTC_5x4 = 194,
+ FMT6_ASTC_5x5 = 195,
+ FMT6_ASTC_6x5 = 196,
+ FMT6_ASTC_6x6 = 197,
+ FMT6_ASTC_8x5 = 198,
+ FMT6_ASTC_8x6 = 199,
+ FMT6_ASTC_8x8 = 200,
+ FMT6_ASTC_10x5 = 201,
+ FMT6_ASTC_10x6 = 202,
+ FMT6_ASTC_10x8 = 203,
+ FMT6_ASTC_10x10 = 204,
+ FMT6_ASTC_12x10 = 205,
+ FMT6_ASTC_12x12 = 206,
+ FMT6_Z24_UINT_S8_UINT = 234,
+ FMT6_NONE = 255,
+};
+
+enum a6xx_polygon_mode {
+ POLYMODE6_POINTS = 1,
+ POLYMODE6_LINES = 2,
+ POLYMODE6_TRIANGLES = 3,
+};
+
+enum a6xx_depth_format {
+ DEPTH6_NONE = 0,
+ DEPTH6_16 = 1,
+ DEPTH6_24_8 = 2,
+ DEPTH6_32 = 4,
+};
+
+enum a6xx_shader_id {
+ A6XX_TP0_TMO_DATA = 9,
+ A6XX_TP0_SMO_DATA = 10,
+ A6XX_TP0_MIPMAP_BASE_DATA = 11,
+ A6XX_TP1_TMO_DATA = 25,
+ A6XX_TP1_SMO_DATA = 26,
+ A6XX_TP1_MIPMAP_BASE_DATA = 27,
+ A6XX_SP_INST_DATA = 41,
+ A6XX_SP_LB_0_DATA = 42,
+ A6XX_SP_LB_1_DATA = 43,
+ A6XX_SP_LB_2_DATA = 44,
+ A6XX_SP_LB_3_DATA = 45,
+ A6XX_SP_LB_4_DATA = 46,
+ A6XX_SP_LB_5_DATA = 47,
+ A6XX_SP_CB_BINDLESS_DATA = 48,
+ A6XX_SP_CB_LEGACY_DATA = 49,
+ A6XX_SP_UAV_DATA = 50,
+ A6XX_SP_INST_TAG = 51,
+ A6XX_SP_CB_BINDLESS_TAG = 52,
+ A6XX_SP_TMO_UMO_TAG = 53,
+ A6XX_SP_SMO_TAG = 54,
+ A6XX_SP_STATE_DATA = 55,
+ A6XX_HLSQ_CHUNK_CVS_RAM = 73,
+ A6XX_HLSQ_CHUNK_CPS_RAM = 74,
+ A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
+ A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
+ A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
+ A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
+ A6XX_HLSQ_CVS_MISC_RAM = 80,
+ A6XX_HLSQ_CPS_MISC_RAM = 81,
+ A6XX_HLSQ_INST_RAM = 82,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM = 83,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM = 84,
+ A6XX_HLSQ_CVS_MISC_RAM_TAG = 85,
+ A6XX_HLSQ_CPS_MISC_RAM_TAG = 86,
+ A6XX_HLSQ_INST_RAM_TAG = 87,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
+ A6XX_HLSQ_PWR_REST_RAM = 90,
+ A6XX_HLSQ_PWR_REST_TAG = 91,
+ A6XX_HLSQ_DATAPATH_META = 96,
+ A6XX_HLSQ_FRONTEND_META = 97,
+ A6XX_HLSQ_INDIRECT_META = 98,
+ A6XX_HLSQ_BACKEND_META = 99,
+ A6XX_SP_LB_6_DATA = 112,
+ A6XX_SP_LB_7_DATA = 113,
+ A6XX_HLSQ_INST_RAM_1 = 115,
+};
+
+enum a6xx_debugbus_id {
+ A6XX_DBGBUS_CP = 1,
+ A6XX_DBGBUS_RBBM = 2,
+ A6XX_DBGBUS_VBIF = 3,
+ A6XX_DBGBUS_HLSQ = 4,
+ A6XX_DBGBUS_UCHE = 5,
+ A6XX_DBGBUS_DPM = 6,
+ A6XX_DBGBUS_TESS = 7,
+ A6XX_DBGBUS_PC = 8,
+ A6XX_DBGBUS_VFDP = 9,
+ A6XX_DBGBUS_VPC = 10,
+ A6XX_DBGBUS_TSE = 11,
+ A6XX_DBGBUS_RAS = 12,
+ A6XX_DBGBUS_VSC = 13,
+ A6XX_DBGBUS_COM = 14,
+ A6XX_DBGBUS_LRZ = 16,
+ A6XX_DBGBUS_A2D = 17,
+ A6XX_DBGBUS_CCUFCHE = 18,
+ A6XX_DBGBUS_GMU_CX = 19,
+ A6XX_DBGBUS_RBP = 20,
+ A6XX_DBGBUS_DCS = 21,
+ A6XX_DBGBUS_DBGC = 22,
+ A6XX_DBGBUS_CX = 23,
+ A6XX_DBGBUS_GMU_GX = 24,
+ A6XX_DBGBUS_TPFCHE = 25,
+ A6XX_DBGBUS_GBIF_GX = 26,
+ A6XX_DBGBUS_GPC = 29,
+ A6XX_DBGBUS_LARC = 30,
+ A6XX_DBGBUS_HLSQ_SPTP = 31,
+ A6XX_DBGBUS_RB_0 = 32,
+ A6XX_DBGBUS_RB_1 = 33,
+ A6XX_DBGBUS_RB_2 = 34,
+ A6XX_DBGBUS_UCHE_WRAPPER = 36,
+ A6XX_DBGBUS_CCU_0 = 40,
+ A6XX_DBGBUS_CCU_1 = 41,
+ A6XX_DBGBUS_CCU_2 = 42,
+ A6XX_DBGBUS_VFD_0 = 56,
+ A6XX_DBGBUS_VFD_1 = 57,
+ A6XX_DBGBUS_VFD_2 = 58,
+ A6XX_DBGBUS_VFD_3 = 59,
+ A6XX_DBGBUS_VFD_4 = 60,
+ A6XX_DBGBUS_VFD_5 = 61,
+ A6XX_DBGBUS_SP_0 = 64,
+ A6XX_DBGBUS_SP_1 = 65,
+ A6XX_DBGBUS_SP_2 = 66,
+ A6XX_DBGBUS_TPL1_0 = 72,
+ A6XX_DBGBUS_TPL1_1 = 73,
+ A6XX_DBGBUS_TPL1_2 = 74,
+ A6XX_DBGBUS_TPL1_3 = 75,
+ A6XX_DBGBUS_TPL1_4 = 76,
+ A6XX_DBGBUS_TPL1_5 = 77,
+ A6XX_DBGBUS_SPTP_0 = 88,
+ A6XX_DBGBUS_SPTP_1 = 89,
+ A6XX_DBGBUS_SPTP_2 = 90,
+ A6XX_DBGBUS_SPTP_3 = 91,
+ A6XX_DBGBUS_SPTP_4 = 92,
+ A6XX_DBGBUS_SPTP_5 = 93,
+};
+
+enum a6xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+ PERF_CP_BUSY_GFX_CORE_IDLE = 1,
+ PERF_CP_BUSY_CYCLES = 2,
+ PERF_CP_NUM_PREEMPTIONS = 3,
+ PERF_CP_PREEMPTION_REACTION_DELAY = 4,
+ PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5,
+ PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6,
+ PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7,
+ PERF_CP_PREDICATED_DRAWS_KILLED = 8,
+ PERF_CP_MODE_SWITCH = 9,
+ PERF_CP_ZPASS_DONE = 10,
+ PERF_CP_CONTEXT_DONE = 11,
+ PERF_CP_CACHE_FLUSH = 12,
+ PERF_CP_LONG_PREEMPTIONS = 13,
+ PERF_CP_SQE_I_CACHE_STARVE = 14,
+ PERF_CP_SQE_IDLE = 15,
+ PERF_CP_SQE_PM4_STARVE_RB_IB = 16,
+ PERF_CP_SQE_PM4_STARVE_SDS = 17,
+ PERF_CP_SQE_MRB_STARVE = 18,
+ PERF_CP_SQE_RRB_STARVE = 19,
+ PERF_CP_SQE_VSD_STARVE = 20,
+ PERF_CP_VSD_DECODE_STARVE = 21,
+ PERF_CP_SQE_PIPE_OUT_STALL = 22,
+ PERF_CP_SQE_SYNC_STALL = 23,
+ PERF_CP_SQE_PM4_WFI_STALL = 24,
+ PERF_CP_SQE_SYS_WFI_STALL = 25,
+ PERF_CP_SQE_T4_EXEC = 26,
+ PERF_CP_SQE_LOAD_STATE_EXEC = 27,
+ PERF_CP_SQE_SAVE_SDS_STATE = 28,
+ PERF_CP_SQE_DRAW_EXEC = 29,
+ PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30,
+ PERF_CP_SQE_EXEC_PROFILED = 31,
+ PERF_CP_MEMORY_POOL_EMPTY = 32,
+ PERF_CP_MEMORY_POOL_SYNC_STALL = 33,
+ PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34,
+ PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35,
+ PERF_CP_AHB_STALL_SQE_GMU = 36,
+ PERF_CP_AHB_STALL_SQE_WR_OTHER = 37,
+ PERF_CP_AHB_STALL_SQE_RD_OTHER = 38,
+ PERF_CP_CLUSTER0_EMPTY = 39,
+ PERF_CP_CLUSTER1_EMPTY = 40,
+ PERF_CP_CLUSTER2_EMPTY = 41,
+ PERF_CP_CLUSTER3_EMPTY = 42,
+ PERF_CP_CLUSTER4_EMPTY = 43,
+ PERF_CP_CLUSTER5_EMPTY = 44,
+ PERF_CP_PM4_DATA = 45,
+ PERF_CP_PM4_HEADERS = 46,
+ PERF_CP_VBIF_READ_BEATS = 47,
+ PERF_CP_VBIF_WRITE_BEATS = 48,
+ PERF_CP_SQE_INSTR_COUNTER = 49,
+};
+
+enum a6xx_rbbm_perfcounter_select {
+ PERF_RBBM_ALWAYS_COUNT = 0,
+ PERF_RBBM_ALWAYS_ON = 1,
+ PERF_RBBM_TSE_BUSY = 2,
+ PERF_RBBM_RAS_BUSY = 3,
+ PERF_RBBM_PC_DCALL_BUSY = 4,
+ PERF_RBBM_PC_VSD_BUSY = 5,
+ PERF_RBBM_STATUS_MASKED = 6,
+ PERF_RBBM_COM_BUSY = 7,
+ PERF_RBBM_DCOM_BUSY = 8,
+ PERF_RBBM_VBIF_BUSY = 9,
+ PERF_RBBM_VSC_BUSY = 10,
+ PERF_RBBM_TESS_BUSY = 11,
+ PERF_RBBM_UCHE_BUSY = 12,
+ PERF_RBBM_HLSQ_BUSY = 13,
+};
+
+enum a6xx_pc_perfcounter_select {
+ PERF_PC_BUSY_CYCLES = 0,
+ PERF_PC_WORKING_CYCLES = 1,
+ PERF_PC_STALL_CYCLES_VFD = 2,
+ PERF_PC_STALL_CYCLES_TSE = 3,
+ PERF_PC_STALL_CYCLES_VPC = 4,
+ PERF_PC_STALL_CYCLES_UCHE = 5,
+ PERF_PC_STALL_CYCLES_TESS = 6,
+ PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
+ PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
+ PERF_PC_PASS1_TF_STALL_CYCLES = 9,
+ PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
+ PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
+ PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
+ PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
+ PERF_PC_STARVE_CYCLES_DI = 14,
+ PERF_PC_VIS_STREAMS_LOADED = 15,
+ PERF_PC_INSTANCES = 16,
+ PERF_PC_VPC_PRIMITIVES = 17,
+ PERF_PC_DEAD_PRIM = 18,
+ PERF_PC_LIVE_PRIM = 19,
+ PERF_PC_VERTEX_HITS = 20,
+ PERF_PC_IA_VERTICES = 21,
+ PERF_PC_IA_PRIMITIVES = 22,
+ PERF_PC_GS_PRIMITIVES = 23,
+ PERF_PC_HS_INVOCATIONS = 24,
+ PERF_PC_DS_INVOCATIONS = 25,
+ PERF_PC_VS_INVOCATIONS = 26,
+ PERF_PC_GS_INVOCATIONS = 27,
+ PERF_PC_DS_PRIMITIVES = 28,
+ PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
+ PERF_PC_3D_DRAWCALLS = 30,
+ PERF_PC_2D_DRAWCALLS = 31,
+ PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
+ PERF_TESS_BUSY_CYCLES = 33,
+ PERF_TESS_WORKING_CYCLES = 34,
+ PERF_TESS_STALL_CYCLES_PC = 35,
+ PERF_TESS_STARVE_CYCLES_PC = 36,
+ PERF_PC_TSE_TRANSACTION = 37,
+ PERF_PC_TSE_VERTEX = 38,
+ PERF_PC_TESS_PC_UV_TRANS = 39,
+ PERF_PC_TESS_PC_UV_PATCHES = 40,
+ PERF_PC_TESS_FACTOR_TRANS = 41,
+};
+
+enum a6xx_vfd_perfcounter_select {
+ PERF_VFD_BUSY_CYCLES = 0,
+ PERF_VFD_STALL_CYCLES_UCHE = 1,
+ PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
+ PERF_VFD_STALL_CYCLES_SP_INFO = 3,
+ PERF_VFD_STALL_CYCLES_SP_ATTR = 4,
+ PERF_VFD_STARVE_CYCLES_UCHE = 5,
+ PERF_VFD_RBUFFER_FULL = 6,
+ PERF_VFD_ATTR_INFO_FIFO_FULL = 7,
+ PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8,
+ PERF_VFD_NUM_ATTRIBUTES = 9,
+ PERF_VFD_UPPER_SHADER_FIBERS = 10,
+ PERF_VFD_LOWER_SHADER_FIBERS = 11,
+ PERF_VFD_MODE_0_FIBERS = 12,
+ PERF_VFD_MODE_1_FIBERS = 13,
+ PERF_VFD_MODE_2_FIBERS = 14,
+ PERF_VFD_MODE_3_FIBERS = 15,
+ PERF_VFD_MODE_4_FIBERS = 16,
+ PERF_VFD_TOTAL_VERTICES = 17,
+ PERF_VFDP_STALL_CYCLES_VFD = 18,
+ PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19,
+ PERF_VFDP_STALL_CYCLES_VFD_PROG = 20,
+ PERF_VFDP_STARVE_CYCLES_PC = 21,
+ PERF_VFDP_VS_STAGE_WAVES = 22,
+};
+
+enum a6xx_hlsq_perfcounter_select {
+ PERF_HLSQ_BUSY_CYCLES = 0,
+ PERF_HLSQ_STALL_CYCLES_UCHE = 1,
+ PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
+ PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
+ PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
+ PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
+ PERF_HLSQ_FS_STAGE_1X_WAVES = 6,
+ PERF_HLSQ_FS_STAGE_2X_WAVES = 7,
+ PERF_HLSQ_QUADS = 8,
+ PERF_HLSQ_CS_INVOCATIONS = 9,
+ PERF_HLSQ_COMPUTE_DRAWCALLS = 10,
+ PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11,
+ PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12,
+ PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13,
+ PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14,
+ PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15,
+ PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16,
+ PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17,
+ PERF_HLSQ_STALL_CYCLES_VPC = 18,
+ PERF_HLSQ_PIXELS = 19,
+ PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20,
+};
+
+enum a6xx_vpc_perfcounter_select {
+ PERF_VPC_BUSY_CYCLES = 0,
+ PERF_VPC_WORKING_CYCLES = 1,
+ PERF_VPC_STALL_CYCLES_UCHE = 2,
+ PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
+ PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
+ PERF_VPC_STALL_CYCLES_PC = 5,
+ PERF_VPC_STALL_CYCLES_SP_LM = 6,
+ PERF_VPC_STARVE_CYCLES_SP = 7,
+ PERF_VPC_STARVE_CYCLES_LRZ = 8,
+ PERF_VPC_PC_PRIMITIVES = 9,
+ PERF_VPC_SP_COMPONENTS = 10,
+ PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11,
+ PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12,
+ PERF_VPC_RB_VISIBLE_PRIMITIVES = 13,
+ PERF_VPC_LM_TRANSACTION = 14,
+ PERF_VPC_STREAMOUT_TRANSACTION = 15,
+ PERF_VPC_VS_BUSY_CYCLES = 16,
+ PERF_VPC_PS_BUSY_CYCLES = 17,
+ PERF_VPC_VS_WORKING_CYCLES = 18,
+ PERF_VPC_PS_WORKING_CYCLES = 19,
+ PERF_VPC_STARVE_CYCLES_RB = 20,
+ PERF_VPC_NUM_VPCRAM_READ_POS = 21,
+ PERF_VPC_WIT_FULL_CYCLES = 22,
+ PERF_VPC_VPCRAM_FULL_CYCLES = 23,
+ PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24,
+ PERF_VPC_NUM_VPCRAM_WRITE = 25,
+ PERF_VPC_NUM_VPCRAM_READ_SO = 26,
+ PERF_VPC_NUM_ATTR_REQ_LM = 27,
+};
+
+enum a6xx_tse_perfcounter_select {
+ PERF_TSE_BUSY_CYCLES = 0,
+ PERF_TSE_CLIPPING_CYCLES = 1,
+ PERF_TSE_STALL_CYCLES_RAS = 2,
+ PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
+ PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
+ PERF_TSE_STARVE_CYCLES_PC = 5,
+ PERF_TSE_INPUT_PRIM = 6,
+ PERF_TSE_INPUT_NULL_PRIM = 7,
+ PERF_TSE_TRIVAL_REJ_PRIM = 8,
+ PERF_TSE_CLIPPED_PRIM = 9,
+ PERF_TSE_ZERO_AREA_PRIM = 10,
+ PERF_TSE_FACENESS_CULLED_PRIM = 11,
+ PERF_TSE_ZERO_PIXEL_PRIM = 12,
+ PERF_TSE_OUTPUT_NULL_PRIM = 13,
+ PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
+ PERF_TSE_CINVOCATION = 15,
+ PERF_TSE_CPRIMITIVES = 16,
+ PERF_TSE_2D_INPUT_PRIM = 17,
+ PERF_TSE_2D_ALIVE_CYCLES = 18,
+ PERF_TSE_CLIP_PLANES = 19,
+};
+
+enum a6xx_ras_perfcounter_select {
+ PERF_RAS_BUSY_CYCLES = 0,
+ PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
+ PERF_RAS_STALL_CYCLES_LRZ = 2,
+ PERF_RAS_STARVE_CYCLES_TSE = 3,
+ PERF_RAS_SUPER_TILES = 4,
+ PERF_RAS_8X4_TILES = 5,
+ PERF_RAS_MASKGEN_ACTIVE = 6,
+ PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
+ PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
+ PERF_RAS_PRIM_KILLED_INVISILBE = 9,
+ PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10,
+ PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11,
+ PERF_RAS_BLOCKS = 12,
+};
+
+enum a6xx_uche_perfcounter_select {
+ PERF_UCHE_BUSY_CYCLES = 0,
+ PERF_UCHE_STALL_CYCLES_ARBITER = 1,
+ PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
+ PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
+ PERF_UCHE_VBIF_READ_BEATS_TP = 4,
+ PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
+ PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
+ PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
+ PERF_UCHE_VBIF_READ_BEATS_SP = 8,
+ PERF_UCHE_READ_REQUESTS_TP = 9,
+ PERF_UCHE_READ_REQUESTS_VFD = 10,
+ PERF_UCHE_READ_REQUESTS_HLSQ = 11,
+ PERF_UCHE_READ_REQUESTS_LRZ = 12,
+ PERF_UCHE_READ_REQUESTS_SP = 13,
+ PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
+ PERF_UCHE_WRITE_REQUESTS_SP = 15,
+ PERF_UCHE_WRITE_REQUESTS_VPC = 16,
+ PERF_UCHE_WRITE_REQUESTS_VSC = 17,
+ PERF_UCHE_EVICTS = 18,
+ PERF_UCHE_BANK_REQ0 = 19,
+ PERF_UCHE_BANK_REQ1 = 20,
+ PERF_UCHE_BANK_REQ2 = 21,
+ PERF_UCHE_BANK_REQ3 = 22,
+ PERF_UCHE_BANK_REQ4 = 23,
+ PERF_UCHE_BANK_REQ5 = 24,
+ PERF_UCHE_BANK_REQ6 = 25,
+ PERF_UCHE_BANK_REQ7 = 26,
+ PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
+ PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
+ PERF_UCHE_GMEM_READ_BEATS = 29,
+ PERF_UCHE_TPH_REF_FULL = 30,
+ PERF_UCHE_TPH_VICTIM_FULL = 31,
+ PERF_UCHE_TPH_EXT_FULL = 32,
+ PERF_UCHE_VBIF_STALL_WRITE_DATA = 33,
+ PERF_UCHE_DCMP_LATENCY_SAMPLES = 34,
+ PERF_UCHE_DCMP_LATENCY_CYCLES = 35,
+ PERF_UCHE_VBIF_READ_BEATS_PC = 36,
+ PERF_UCHE_READ_REQUESTS_PC = 37,
+ PERF_UCHE_RAM_READ_REQ = 38,
+ PERF_UCHE_RAM_WRITE_REQ = 39,
+};
+
+enum a6xx_tp_perfcounter_select {
+ PERF_TP_BUSY_CYCLES = 0,
+ PERF_TP_STALL_CYCLES_UCHE = 1,
+ PERF_TP_LATENCY_CYCLES = 2,
+ PERF_TP_LATENCY_TRANS = 3,
+ PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
+ PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
+ PERF_TP_L1_CACHELINE_REQUESTS = 6,
+ PERF_TP_L1_CACHELINE_MISSES = 7,
+ PERF_TP_SP_TP_TRANS = 8,
+ PERF_TP_TP_SP_TRANS = 9,
+ PERF_TP_OUTPUT_PIXELS = 10,
+ PERF_TP_FILTER_WORKLOAD_16BIT = 11,
+ PERF_TP_FILTER_WORKLOAD_32BIT = 12,
+ PERF_TP_QUADS_RECEIVED = 13,
+ PERF_TP_QUADS_OFFSET = 14,
+ PERF_TP_QUADS_SHADOW = 15,
+ PERF_TP_QUADS_ARRAY = 16,
+ PERF_TP_QUADS_GRADIENT = 17,
+ PERF_TP_QUADS_1D = 18,
+ PERF_TP_QUADS_2D = 19,
+ PERF_TP_QUADS_BUFFER = 20,
+ PERF_TP_QUADS_3D = 21,
+ PERF_TP_QUADS_CUBE = 22,
+ PERF_TP_DIVERGENT_QUADS_RECEIVED = 23,
+ PERF_TP_PRT_NON_RESIDENT_EVENTS = 24,
+ PERF_TP_OUTPUT_PIXELS_POINT = 25,
+ PERF_TP_OUTPUT_PIXELS_BILINEAR = 26,
+ PERF_TP_OUTPUT_PIXELS_MIP = 27,
+ PERF_TP_OUTPUT_PIXELS_ANISO = 28,
+ PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29,
+ PERF_TP_FLAG_CACHE_REQUESTS = 30,
+ PERF_TP_FLAG_CACHE_MISSES = 31,
+ PERF_TP_L1_5_L2_REQUESTS = 32,
+ PERF_TP_2D_OUTPUT_PIXELS = 33,
+ PERF_TP_2D_OUTPUT_PIXELS_POINT = 34,
+ PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35,
+ PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36,
+ PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37,
+ PERF_TP_TPA2TPC_TRANS = 38,
+ PERF_TP_L1_MISSES_ASTC_1TILE = 39,
+ PERF_TP_L1_MISSES_ASTC_2TILE = 40,
+ PERF_TP_L1_MISSES_ASTC_4TILE = 41,
+ PERF_TP_L1_5_L2_COMPRESS_REQS = 42,
+ PERF_TP_L1_5_L2_COMPRESS_MISS = 43,
+ PERF_TP_L1_BANK_CONFLICT = 44,
+ PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45,
+ PERF_TP_L1_5_MISS_LATENCY_TRANS = 46,
+ PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47,
+ PERF_TP_FRONTEND_WORKING_CYCLES = 48,
+ PERF_TP_L1_TAG_WORKING_CYCLES = 49,
+ PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50,
+ PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51,
+ PERF_TP_BACKEND_WORKING_CYCLES = 52,
+ PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53,
+ PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54,
+ PERF_TP_STARVE_CYCLES_SP = 55,
+ PERF_TP_STARVE_CYCLES_UCHE = 56,
+};
+
+enum a6xx_sp_perfcounter_select {
+ PERF_SP_BUSY_CYCLES = 0,
+ PERF_SP_ALU_WORKING_CYCLES = 1,
+ PERF_SP_EFU_WORKING_CYCLES = 2,
+ PERF_SP_STALL_CYCLES_VPC = 3,
+ PERF_SP_STALL_CYCLES_TP = 4,
+ PERF_SP_STALL_CYCLES_UCHE = 5,
+ PERF_SP_STALL_CYCLES_RB = 6,
+ PERF_SP_NON_EXECUTION_CYCLES = 7,
+ PERF_SP_WAVE_CONTEXTS = 8,
+ PERF_SP_WAVE_CONTEXT_CYCLES = 9,
+ PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
+ PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
+ PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
+ PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
+ PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
+ PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
+ PERF_SP_WAVE_CTRL_CYCLES = 16,
+ PERF_SP_WAVE_LOAD_CYCLES = 17,
+ PERF_SP_WAVE_EMIT_CYCLES = 18,
+ PERF_SP_WAVE_NOP_CYCLES = 19,
+ PERF_SP_WAVE_WAIT_CYCLES = 20,
+ PERF_SP_WAVE_FETCH_CYCLES = 21,
+ PERF_SP_WAVE_IDLE_CYCLES = 22,
+ PERF_SP_WAVE_END_CYCLES = 23,
+ PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
+ PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
+ PERF_SP_WAVE_JOIN_CYCLES = 26,
+ PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
+ PERF_SP_LM_STORE_INSTRUCTIONS = 28,
+ PERF_SP_LM_ATOMICS = 29,
+ PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
+ PERF_SP_GM_STORE_INSTRUCTIONS = 31,
+ PERF_SP_GM_ATOMICS = 32,
+ PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
+ PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34,
+ PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35,
+ PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36,
+ PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37,
+ PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38,
+ PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39,
+ PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40,
+ PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41,
+ PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42,
+ PERF_SP_VS_INSTRUCTIONS = 43,
+ PERF_SP_FS_INSTRUCTIONS = 44,
+ PERF_SP_ADDR_LOCK_COUNT = 45,
+ PERF_SP_UCHE_READ_TRANS = 46,
+ PERF_SP_UCHE_WRITE_TRANS = 47,
+ PERF_SP_EXPORT_VPC_TRANS = 48,
+ PERF_SP_EXPORT_RB_TRANS = 49,
+ PERF_SP_PIXELS_KILLED = 50,
+ PERF_SP_ICL1_REQUESTS = 51,
+ PERF_SP_ICL1_MISSES = 52,
+ PERF_SP_HS_INSTRUCTIONS = 53,
+ PERF_SP_DS_INSTRUCTIONS = 54,
+ PERF_SP_GS_INSTRUCTIONS = 55,
+ PERF_SP_CS_INSTRUCTIONS = 56,
+ PERF_SP_GPR_READ = 57,
+ PERF_SP_GPR_WRITE = 58,
+ PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59,
+ PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60,
+ PERF_SP_LM_BANK_CONFLICTS = 61,
+ PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62,
+ PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63,
+ PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64,
+ PERF_SP_LM_WORKING_CYCLES = 65,
+ PERF_SP_DISPATCHER_WORKING_CYCLES = 66,
+ PERF_SP_SEQUENCER_WORKING_CYCLES = 67,
+ PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68,
+ PERF_SP_STARVE_CYCLES_HLSQ = 69,
+ PERF_SP_NON_EXECUTION_LS_CYCLES = 70,
+ PERF_SP_WORKING_EU = 71,
+ PERF_SP_ANY_EU_WORKING = 72,
+ PERF_SP_WORKING_EU_FS_STAGE = 73,
+ PERF_SP_ANY_EU_WORKING_FS_STAGE = 74,
+ PERF_SP_WORKING_EU_VS_STAGE = 75,
+ PERF_SP_ANY_EU_WORKING_VS_STAGE = 76,
+ PERF_SP_WORKING_EU_CS_STAGE = 77,
+ PERF_SP_ANY_EU_WORKING_CS_STAGE = 78,
+ PERF_SP_GPR_READ_PREFETCH = 79,
+ PERF_SP_GPR_READ_CONFLICT = 80,
+ PERF_SP_GPR_WRITE_CONFLICT = 81,
+ PERF_SP_GM_LOAD_LATENCY_CYCLES = 82,
+ PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83,
+ PERF_SP_EXECUTABLE_WAVES = 84,
+};
+
+enum a6xx_rb_perfcounter_select {
+ PERF_RB_BUSY_CYCLES = 0,
+ PERF_RB_STALL_CYCLES_HLSQ = 1,
+ PERF_RB_STALL_CYCLES_FIFO0_FULL = 2,
+ PERF_RB_STALL_CYCLES_FIFO1_FULL = 3,
+ PERF_RB_STALL_CYCLES_FIFO2_FULL = 4,
+ PERF_RB_STARVE_CYCLES_SP = 5,
+ PERF_RB_STARVE_CYCLES_LRZ_TILE = 6,
+ PERF_RB_STARVE_CYCLES_CCU = 7,
+ PERF_RB_STARVE_CYCLES_Z_PLANE = 8,
+ PERF_RB_STARVE_CYCLES_BARY_PLANE = 9,
+ PERF_RB_Z_WORKLOAD = 10,
+ PERF_RB_HLSQ_ACTIVE = 11,
+ PERF_RB_Z_READ = 12,
+ PERF_RB_Z_WRITE = 13,
+ PERF_RB_C_READ = 14,
+ PERF_RB_C_WRITE = 15,
+ PERF_RB_TOTAL_PASS = 16,
+ PERF_RB_Z_PASS = 17,
+ PERF_RB_Z_FAIL = 18,
+ PERF_RB_S_FAIL = 19,
+ PERF_RB_BLENDED_FXP_COMPONENTS = 20,
+ PERF_RB_BLENDED_FP16_COMPONENTS = 21,
+ PERF_RB_PS_INVOCATIONS = 22,
+ PERF_RB_2D_ALIVE_CYCLES = 23,
+ PERF_RB_2D_STALL_CYCLES_A2D = 24,
+ PERF_RB_2D_STARVE_CYCLES_SRC = 25,
+ PERF_RB_2D_STARVE_CYCLES_SP = 26,
+ PERF_RB_2D_STARVE_CYCLES_DST = 27,
+ PERF_RB_2D_VALID_PIXELS = 28,
+ PERF_RB_3D_PIXELS = 29,
+ PERF_RB_BLENDER_WORKING_CYCLES = 30,
+ PERF_RB_ZPROC_WORKING_CYCLES = 31,
+ PERF_RB_CPROC_WORKING_CYCLES = 32,
+ PERF_RB_SAMPLER_WORKING_CYCLES = 33,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37,
+ PERF_RB_STALL_CYCLES_VPC = 38,
+ PERF_RB_2D_INPUT_TRANS = 39,
+ PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40,
+ PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41,
+ PERF_RB_BLENDED_FP32_COMPONENTS = 42,
+ PERF_RB_COLOR_PIX_TILES = 43,
+ PERF_RB_STALL_CYCLES_CCU = 44,
+ PERF_RB_EARLY_Z_ARB3_GRANT = 45,
+ PERF_RB_LATE_Z_ARB3_GRANT = 46,
+ PERF_RB_EARLY_Z_SKIP_GRANT = 47,
+};
+
+enum a6xx_vsc_perfcounter_select {
+ PERF_VSC_BUSY_CYCLES = 0,
+ PERF_VSC_WORKING_CYCLES = 1,
+ PERF_VSC_STALL_CYCLES_UCHE = 2,
+ PERF_VSC_EOT_NUM = 3,
+ PERF_VSC_INPUT_TILES = 4,
+};
+
+enum a6xx_ccu_perfcounter_select {
+ PERF_CCU_BUSY_CYCLES = 0,
+ PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
+ PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
+ PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
+ PERF_CCU_DEPTH_BLOCKS = 4,
+ PERF_CCU_COLOR_BLOCKS = 5,
+ PERF_CCU_DEPTH_BLOCK_HIT = 6,
+ PERF_CCU_COLOR_BLOCK_HIT = 7,
+ PERF_CCU_PARTIAL_BLOCK_READ = 8,
+ PERF_CCU_GMEM_READ = 9,
+ PERF_CCU_GMEM_WRITE = 10,
+ PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
+ PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
+ PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
+ PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
+ PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
+ PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16,
+ PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17,
+ PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18,
+ PERF_CCU_COLOR_READ_FLAG0_COUNT = 19,
+ PERF_CCU_COLOR_READ_FLAG1_COUNT = 20,
+ PERF_CCU_COLOR_READ_FLAG2_COUNT = 21,
+ PERF_CCU_COLOR_READ_FLAG3_COUNT = 22,
+ PERF_CCU_COLOR_READ_FLAG4_COUNT = 23,
+ PERF_CCU_COLOR_READ_FLAG5_COUNT = 24,
+ PERF_CCU_COLOR_READ_FLAG6_COUNT = 25,
+ PERF_CCU_COLOR_READ_FLAG8_COUNT = 26,
+ PERF_CCU_2D_RD_REQ = 27,
+ PERF_CCU_2D_WR_REQ = 28,
+};
+
+enum a6xx_lrz_perfcounter_select {
+ PERF_LRZ_BUSY_CYCLES = 0,
+ PERF_LRZ_STARVE_CYCLES_RAS = 1,
+ PERF_LRZ_STALL_CYCLES_RB = 2,
+ PERF_LRZ_STALL_CYCLES_VSC = 3,
+ PERF_LRZ_STALL_CYCLES_VPC = 4,
+ PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
+ PERF_LRZ_STALL_CYCLES_UCHE = 6,
+ PERF_LRZ_LRZ_READ = 7,
+ PERF_LRZ_LRZ_WRITE = 8,
+ PERF_LRZ_READ_LATENCY = 9,
+ PERF_LRZ_MERGE_CACHE_UPDATING = 10,
+ PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
+ PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
+ PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
+ PERF_LRZ_FULL_8X8_TILES = 14,
+ PERF_LRZ_PARTIAL_8X8_TILES = 15,
+ PERF_LRZ_TILE_KILLED = 16,
+ PERF_LRZ_TOTAL_PIXEL = 17,
+ PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
+ PERF_LRZ_FULLY_COVERED_TILES = 19,
+ PERF_LRZ_PARTIAL_COVERED_TILES = 20,
+ PERF_LRZ_FEEDBACK_ACCEPT = 21,
+ PERF_LRZ_FEEDBACK_DISCARD = 22,
+ PERF_LRZ_FEEDBACK_STALL = 23,
+ PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24,
+ PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25,
+ PERF_LRZ_STALL_CYCLES_VC = 26,
+ PERF_LRZ_RAS_MASK_TRANS = 27,
+};
+
+enum a6xx_cmp_perfcounter_select {
+ PERF_CMPDECMP_STALL_CYCLES_ARB = 0,
+ PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
+ PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
+ PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
+ PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
+ PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
+ PERF_CMPDECMP_VBIF_READ_DATA = 7,
+ PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
+ PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
+ PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27,
+ PERF_CMPDECMP_2D_RD_DATA = 28,
+ PERF_CMPDECMP_2D_WR_DATA = 29,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31,
+ PERF_CMPDECMP_2D_OUTPUT_TRANS = 32,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35,
+ PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36,
+ PERF_CMPDECMP_2D_BUSY_CYCLES = 37,
+ PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38,
+ PERF_CMPDECMP_2D_PIXELS = 39,
+};
+
+enum a6xx_2d_ifmt {
+ R2D_UNORM8 = 16,
+ R2D_INT32 = 7,
+ R2D_INT16 = 6,
+ R2D_INT8 = 5,
+ R2D_FLOAT32 = 4,
+ R2D_FLOAT16 = 3,
+ R2D_UNORM8_SRGB = 1,
+ R2D_RAW = 0,
+};
+
+enum a6xx_ztest_mode {
+ A6XX_EARLY_Z = 0,
+ A6XX_LATE_Z = 1,
+ A6XX_EARLY_LRZ_LATE_Z = 2,
+ A6XX_INVALID_ZTEST = 3,
+};
+
+enum a6xx_sequenced_thread_dist {
+ DIST_SCREEN_COORD = 0,
+ DIST_ALL_TO_RB0 = 1,
+};
+
+enum a6xx_single_prim_mode {
+ NO_FLUSH = 0,
+ FLUSH_PER_OVERLAP_AND_OVERWRITE = 1,
+ FLUSH_PER_OVERLAP = 3,
+};
+
+enum a6xx_raster_mode {
+ TYPE_TILED = 0,
+ TYPE_WRITER = 1,
+};
+
+enum a6xx_raster_direction {
+ LR_TB = 0,
+ RL_TB = 1,
+ LR_BT = 2,
+ RB_BT = 3,
+};
+
+enum a6xx_render_mode {
+ RENDERING_PASS = 0,
+ BINNING_PASS = 1,
+};
+
+enum a6xx_buffers_location {
+ BUFFERS_IN_GMEM = 0,
+ BUFFERS_IN_SYSMEM = 3,
+};
+
+enum a6xx_lrz_dir_status {
+ LRZ_DIR_LE = 1,
+ LRZ_DIR_GE = 2,
+ LRZ_DIR_INVALID = 3,
+};
+
+enum a6xx_fragcoord_sample_mode {
+ FRAGCOORD_CENTER = 0,
+ FRAGCOORD_SAMPLE = 3,
+};
+
+enum a6xx_rotation {
+ ROTATE_0 = 0,
+ ROTATE_90 = 1,
+ ROTATE_180 = 2,
+ ROTATE_270 = 3,
+ ROTATE_HFLIP = 4,
+ ROTATE_VFLIP = 5,
+};
+
+enum a6xx_tess_spacing {
+ TESS_EQUAL = 0,
+ TESS_FRACTIONAL_ODD = 2,
+ TESS_FRACTIONAL_EVEN = 3,
+};
+
+enum a6xx_tess_output {
+ TESS_POINTS = 0,
+ TESS_LINES = 1,
+ TESS_CW_TRIS = 2,
+ TESS_CCW_TRIS = 3,
+};
+
+enum a6xx_threadsize {
+ THREAD64 = 0,
+ THREAD128 = 1,
+};
+
+enum a6xx_bindless_descriptor_size {
+ BINDLESS_DESCRIPTOR_16B = 1,
+ BINDLESS_DESCRIPTOR_64B = 3,
+};
+
+enum a6xx_isam_mode {
+ ISAMMODE_GL = 2,
+};
+
+enum a6xx_tex_filter {
+ A6XX_TEX_NEAREST = 0,
+ A6XX_TEX_LINEAR = 1,
+ A6XX_TEX_ANISO = 2,
+ A6XX_TEX_CUBIC = 3,
+};
+
+enum a6xx_tex_clamp {
+ A6XX_TEX_REPEAT = 0,
+ A6XX_TEX_CLAMP_TO_EDGE = 1,
+ A6XX_TEX_MIRROR_REPEAT = 2,
+ A6XX_TEX_CLAMP_TO_BORDER = 3,
+ A6XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a6xx_tex_aniso {
+ A6XX_TEX_ANISO_1 = 0,
+ A6XX_TEX_ANISO_2 = 1,
+ A6XX_TEX_ANISO_4 = 2,
+ A6XX_TEX_ANISO_8 = 3,
+ A6XX_TEX_ANISO_16 = 4,
+};
+
+enum a6xx_reduction_mode {
+ A6XX_REDUCTION_MODE_AVERAGE = 0,
+ A6XX_REDUCTION_MODE_MIN = 1,
+ A6XX_REDUCTION_MODE_MAX = 2,
+};
+
+enum a6xx_tex_swiz {
+ A6XX_TEX_X = 0,
+ A6XX_TEX_Y = 1,
+ A6XX_TEX_Z = 2,
+ A6XX_TEX_W = 3,
+ A6XX_TEX_ZERO = 4,
+ A6XX_TEX_ONE = 5,
+};
+
+enum a6xx_tex_type {
+ A6XX_TEX_1D = 0,
+ A6XX_TEX_2D = 1,
+ A6XX_TEX_CUBE = 2,
+ A6XX_TEX_3D = 3,
+ A6XX_TEX_BUFFER = 4,
+};
+
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002
+#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_0 0x00000010
+#define A6XX_RBBM_INT_0_MASK_CP_IPC_INTR_1 0x00000020
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT 0x00008000
+#define A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPTLPAC 0x00010000
+#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS_LPAC 0x00200000
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000
+#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A6XX_RBBM_INT_0_MASK_TSBWRITEERROR 0x10000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080
+#define A6XX_CP_INT_CP_OPCODE_ERROR_LPAC 0x00000100
+#define A6XX_CP_INT_CP_UCODE_ERROR_LPAC 0x00000200
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR_LPAC 0x00000400
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_LPAC 0x00000800
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_LPAC 0x00001000
+#define A6XX_CP_INT_CP_OPCODE_ERROR_BV 0x00002000
+#define A6XX_CP_INT_CP_UCODE_ERROR_BV 0x00004000
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR_BV 0x00008000
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR_BV 0x00010000
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR_BV 0x00020000
+#define REG_A6XX_CP_RB_BASE 0x00000800
+
+#define REG_A6XX_CP_RB_CNTL 0x00000802
+
+#define REG_A6XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A6XX_CP_RB_RPTR 0x00000806
+
+#define REG_A6XX_CP_RB_WPTR 0x00000807
+
+#define REG_A6XX_CP_SQE_CNTL 0x00000808
+
+#define REG_A6XX_CP_CP2GMU_STATUS 0x00000812
+#define A6XX_CP_CP2GMU_STATUS_IFPC 0x00000001
+
+#define REG_A6XX_CP_HW_FAULT 0x00000821
+
+#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
+
+#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
+
+#define REG_A6XX_CP_STATUS_1 0x00000825
+
+#define REG_A6XX_CP_SQE_INSTR_BASE 0x00000830
+
+#define REG_A6XX_CP_MISC_CNTL 0x00000840
+
+#define REG_A6XX_CP_APRIV_CNTL 0x00000844
+
+#define REG_A6XX_CP_PREEMPT_THRESHOLD 0x000008c0
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
+#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK 0x000000ff
+#define A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_MRB_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_MRB_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK 0x0000ff00
+#define A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT 8
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_VSD_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_VSD_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK 0x00ff0000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB1_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB1_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK 0xff000000
+#define A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT 24
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_1_IB2_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_1_IB2_START__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK 0x000001ff
+#define A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_SDS_START(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_SDS_START__MASK;
+}
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK 0xffff0000
+#define A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE(uint32_t val)
+{
+ return ((val >> 2) << A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__SHIFT) & A6XX_CP_ROQ_THRESHOLDS_2_ROQ_SIZE__MASK;
+}
+
+#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
+
+#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
+
+#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842
+
+#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
+
+#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
+#define A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE 0x00000008
+#define A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN 0x00000002
+#define A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN 0x00000001
+
+static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000
+#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18
+static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A6XX_CP_PROTECT_REG_READ 0x80000000
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO 0x000008a1
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR 0x000008a3
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR 0x000008a5
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR 0x000008a7
+
+#define REG_A7XX_CP_CONTEXT_SWITCH_LEVEL_STATUS 0x000008ab
+
+static inline uint32_t REG_A6XX_CP_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008d0 + 0x1*i0; }
+
+static inline uint32_t REG_A7XX_CP_BV_PERFCTR_CP_SEL(uint32_t i0) { return 0x000008e0 + 0x1*i0; }
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE 0x00000900
+
+#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902
+
+#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903
+
+#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908
+
+#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909
+
+#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a
+
+#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b
+
+#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c
+
+#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d
+
+#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e
+
+#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911
+
+#define REG_A6XX_CP_IB1_BASE 0x00000928
+
+#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a
+
+#define REG_A6XX_CP_IB2_BASE 0x0000092b
+
+#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
+
+#define REG_A6XX_CP_SDS_BASE 0x0000092e
+
+#define REG_A6XX_CP_SDS_REM_SIZE 0x00000930
+
+#define REG_A6XX_CP_MRB_BASE 0x00000931
+
+#define REG_A6XX_CP_MRB_REM_SIZE 0x00000933
+
+#define REG_A6XX_CP_VSD_BASE 0x00000934
+
+#define REG_A6XX_CP_ROQ_RB_STAT 0x00000939
+#define A6XX_CP_ROQ_RB_STAT_RPTR__MASK 0x000003ff
+#define A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_RB_STAT_RPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_RB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_RPTR__MASK;
+}
+#define A6XX_CP_ROQ_RB_STAT_WPTR__MASK 0x03ff0000
+#define A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_RB_STAT_WPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_RB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_RB_STAT_WPTR__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_IB1_STAT 0x0000093a
+#define A6XX_CP_ROQ_IB1_STAT_RPTR__MASK 0x000003ff
+#define A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_IB1_STAT_RPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_IB1_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_RPTR__MASK;
+}
+#define A6XX_CP_ROQ_IB1_STAT_WPTR__MASK 0x03ff0000
+#define A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_IB1_STAT_WPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_IB1_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB1_STAT_WPTR__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_IB2_STAT 0x0000093b
+#define A6XX_CP_ROQ_IB2_STAT_RPTR__MASK 0x000003ff
+#define A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_IB2_STAT_RPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_IB2_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_RPTR__MASK;
+}
+#define A6XX_CP_ROQ_IB2_STAT_WPTR__MASK 0x03ff0000
+#define A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_IB2_STAT_WPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_IB2_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_IB2_STAT_WPTR__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_SDS_STAT 0x0000093c
+#define A6XX_CP_ROQ_SDS_STAT_RPTR__MASK 0x000003ff
+#define A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_SDS_STAT_RPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_SDS_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_RPTR__MASK;
+}
+#define A6XX_CP_ROQ_SDS_STAT_WPTR__MASK 0x03ff0000
+#define A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_SDS_STAT_WPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_SDS_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_SDS_STAT_WPTR__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_MRB_STAT 0x0000093d
+#define A6XX_CP_ROQ_MRB_STAT_RPTR__MASK 0x000003ff
+#define A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_MRB_STAT_RPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_MRB_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_RPTR__MASK;
+}
+#define A6XX_CP_ROQ_MRB_STAT_WPTR__MASK 0x03ff0000
+#define A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_MRB_STAT_WPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_MRB_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_MRB_STAT_WPTR__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_VSD_STAT 0x0000093e
+#define A6XX_CP_ROQ_VSD_STAT_RPTR__MASK 0x000003ff
+#define A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT 0
+static inline uint32_t A6XX_CP_ROQ_VSD_STAT_RPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_VSD_STAT_RPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_RPTR__MASK;
+}
+#define A6XX_CP_ROQ_VSD_STAT_WPTR__MASK 0x03ff0000
+#define A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_VSD_STAT_WPTR(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_VSD_STAT_WPTR__SHIFT) & A6XX_CP_ROQ_VSD_STAT_WPTR__MASK;
+}
+
+#define REG_A6XX_CP_IB1_DWORDS 0x00000943
+
+#define REG_A6XX_CP_IB2_DWORDS 0x00000944
+
+#define REG_A6XX_CP_SDS_DWORDS 0x00000945
+
+#define REG_A6XX_CP_MRB_DWORDS 0x00000946
+
+#define REG_A6XX_CP_VSD_DWORDS 0x00000947
+
+#define REG_A6XX_CP_ROQ_AVAIL_RB 0x00000948
+#define A6XX_CP_ROQ_AVAIL_RB_REM__MASK 0xffff0000
+#define A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_AVAIL_RB_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_AVAIL_RB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_RB_REM__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_AVAIL_IB1 0x00000949
+#define A6XX_CP_ROQ_AVAIL_IB1_REM__MASK 0xffff0000
+#define A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_AVAIL_IB1_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_AVAIL_IB1_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB1_REM__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_AVAIL_IB2 0x0000094a
+#define A6XX_CP_ROQ_AVAIL_IB2_REM__MASK 0xffff0000
+#define A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_AVAIL_IB2_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_AVAIL_IB2_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_IB2_REM__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_AVAIL_SDS 0x0000094b
+#define A6XX_CP_ROQ_AVAIL_SDS_REM__MASK 0xffff0000
+#define A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_AVAIL_SDS_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_AVAIL_SDS_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_SDS_REM__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_AVAIL_MRB 0x0000094c
+#define A6XX_CP_ROQ_AVAIL_MRB_REM__MASK 0xffff0000
+#define A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_AVAIL_MRB_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_AVAIL_MRB_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_MRB_REM__MASK;
+}
+
+#define REG_A6XX_CP_ROQ_AVAIL_VSD 0x0000094d
+#define A6XX_CP_ROQ_AVAIL_VSD_REM__MASK 0xffff0000
+#define A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT 16
+static inline uint32_t A6XX_CP_ROQ_AVAIL_VSD_REM(uint32_t val)
+{
+ return ((val) << A6XX_CP_ROQ_AVAIL_VSD_REM__SHIFT) & A6XX_CP_ROQ_AVAIL_VSD_REM__MASK;
+}
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER 0x00000980
+
+#define REG_A6XX_CP_AHB_CNTL 0x0000098d
+
+#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
+
+#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+
+#define REG_A7XX_CP_BV_PROTECT_STATUS 0x00000a61
+
+#define REG_A7XX_CP_BV_HW_FAULT 0x00000a64
+
+#define REG_A7XX_CP_BV_DRAW_STATE_ADDR 0x00000a81
+
+#define REG_A7XX_CP_BV_DRAW_STATE_DATA 0x00000a82
+
+#define REG_A7XX_CP_BV_ROQ_DBG_ADDR 0x00000a83
+
+#define REG_A7XX_CP_BV_ROQ_DBG_DATA 0x00000a84
+
+#define REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR 0x00000a85
+
+#define REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA 0x00000a86
+
+#define REG_A7XX_CP_BV_SQE_STAT_ADDR 0x00000a87
+
+#define REG_A7XX_CP_BV_SQE_STAT_DATA 0x00000a88
+
+#define REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR 0x00000a96
+
+#define REG_A7XX_CP_BV_MEM_POOL_DBG_DATA 0x00000a97
+
+#define REG_A7XX_CP_BV_RB_RPTR_ADDR 0x00000a98
+
+#define REG_A7XX_CP_RESOURCE_TBL_DBG_ADDR 0x00000a9a
+
+#define REG_A7XX_CP_RESOURCE_TBL_DBG_DATA 0x00000a9b
+
+#define REG_A7XX_CP_BV_APRIV_CNTL 0x00000ad0
+
+#define REG_A7XX_CP_BV_CHICKEN_DBG 0x00000ada
+
+#define REG_A7XX_CP_LPAC_DRAW_STATE_ADDR 0x00000b0a
+
+#define REG_A7XX_CP_LPAC_DRAW_STATE_DATA 0x00000b0b
+
+#define REG_A7XX_CP_LPAC_ROQ_DBG_ADDR 0x00000b0c
+
+#define REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR 0x00000b27
+
+#define REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA 0x00000b28
+
+#define REG_A7XX_CP_SQE_AC_STAT_ADDR 0x00000b29
+
+#define REG_A7XX_CP_SQE_AC_STAT_DATA 0x00000b2a
+
+#define REG_A7XX_CP_LPAC_APRIV_CNTL 0x00000b31
+
+#define REG_A6XX_CP_LPAC_PROG_FIFO_SIZE 0x00000b34
+
+#define REG_A7XX_CP_LPAC_ROQ_DBG_DATA 0x00000b35
+
+#define REG_A7XX_CP_LPAC_FIFO_DBG_DATA 0x00000b36
+
+#define REG_A7XX_CP_LPAC_FIFO_DBG_ADDR 0x00000b40
+
+#define REG_A6XX_CP_LPAC_SQE_INSTR_BASE 0x00000b82
+
+#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
+
+#define REG_A6XX_RBBM_GPR0_CNTL 0x00000018
+
+#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
+
+#define REG_A6XX_RBBM_STATUS 0x00000210
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
+#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000
+#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000
+#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000
+#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000
+#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000
+#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000
+#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000
+#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000
+#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000
+#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000
+#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800
+#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400
+#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200
+#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100
+#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080
+#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040
+#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020
+#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010
+#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008
+#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
+
+#define REG_A6XX_RBBM_STATUS1 0x00000211
+
+#define REG_A6XX_RBBM_STATUS2 0x00000212
+
+#define REG_A6XX_RBBM_STATUS3 0x00000213
+#define A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT 0x01000000
+
+#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
+
+#define REG_A7XX_RBBM_CLOCK_MODE_CP 0x00000260
+
+#define REG_A7XX_RBBM_CLOCK_MODE_BV_LRZ 0x00000284
+
+#define REG_A7XX_RBBM_CLOCK_MODE_BV_GRAS 0x00000285
+
+#define REG_A7XX_RBBM_CLOCK_MODE2_GRAS 0x00000286
+
+#define REG_A7XX_RBBM_CLOCK_MODE_BV_VFD 0x00000287
+
+#define REG_A7XX_RBBM_CLOCK_MODE_BV_GPC 0x00000288
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000400 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000041c + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000424 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000434 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000444 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000450 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000045c + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000466 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000046e + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000476 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000048e + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000004a6 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000004d6 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000004e6 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000004ea + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000004f2 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_CP(uint32_t i0) { return 0x00000300 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_RBBM(uint32_t i0) { return 0x0000031c + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_PC(uint32_t i0) { return 0x00000324 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_VFD(uint32_t i0) { return 0x00000334 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_HLSQ(uint32_t i0) { return 0x00000344 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_VPC(uint32_t i0) { return 0x00000350 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_CCU(uint32_t i0) { return 0x0000035c + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_TSE(uint32_t i0) { return 0x00000366 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_RAS(uint32_t i0) { return 0x0000036e + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_UCHE(uint32_t i0) { return 0x00000376 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_TP(uint32_t i0) { return 0x0000038e + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_SP(uint32_t i0) { return 0x000003a6 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_RB(uint32_t i0) { return 0x000003d6 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_VSC(uint32_t i0) { return 0x000003e6 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_LRZ(uint32_t i0) { return 0x000003ea + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_CMP(uint32_t i0) { return 0x000003f2 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_UFC(uint32_t i0) { return 0x000003fa + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR2_HLSQ(uint32_t i0) { return 0x00000410 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR2_CP(uint32_t i0) { return 0x0000041c + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR2_SP(uint32_t i0) { return 0x0000042a + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR2_TP(uint32_t i0) { return 0x00000442 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR2_UFC(uint32_t i0) { return 0x0000044e + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_PC(uint32_t i0) { return 0x00000460 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_VFD(uint32_t i0) { return 0x00000470 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_VPC(uint32_t i0) { return 0x00000480 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_TSE(uint32_t i0) { return 0x0000048c + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_RAS(uint32_t i0) { return 0x00000494 + 0x2*i0; }
+
+static inline uint32_t REG_A7XX_RBBM_PERFCTR_BV_LRZ(uint32_t i0) { return 0x0000049c + 0x2*i0; }
+
+#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
+
+static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00000507 + 0x1*i0; }
+
+#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
+
+#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD 0x0000050e
+
+#define REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS 0x0000050f
+
+#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A7XX_RBBM_NC_MODE_CNTL 0x00000534
+
+#define REG_A7XX_RBBM_SNAPSHOT_STATUS 0x00000535
+
+#define REG_A6XX_RBBM_PRIMCTR_0_LO 0x00000540
+
+#define REG_A6XX_RBBM_PRIMCTR_0_HI 0x00000541
+
+#define REG_A6XX_RBBM_PRIMCTR_1_LO 0x00000542
+
+#define REG_A6XX_RBBM_PRIMCTR_1_HI 0x00000543
+
+#define REG_A6XX_RBBM_PRIMCTR_2_LO 0x00000544
+
+#define REG_A6XX_RBBM_PRIMCTR_2_HI 0x00000545
+
+#define REG_A6XX_RBBM_PRIMCTR_3_LO 0x00000546
+
+#define REG_A6XX_RBBM_PRIMCTR_3_HI 0x00000547
+
+#define REG_A6XX_RBBM_PRIMCTR_4_LO 0x00000548
+
+#define REG_A6XX_RBBM_PRIMCTR_4_HI 0x00000549
+
+#define REG_A6XX_RBBM_PRIMCTR_5_LO 0x0000054a
+
+#define REG_A6XX_RBBM_PRIMCTR_5_HI 0x0000054b
+
+#define REG_A6XX_RBBM_PRIMCTR_6_LO 0x0000054c
+
+#define REG_A6XX_RBBM_PRIMCTR_6_HI 0x0000054d
+
+#define REG_A6XX_RBBM_PRIMCTR_7_LO 0x0000054e
+
+#define REG_A6XX_RBBM_PRIMCTR_7_HI 0x0000054f
+
+#define REG_A6XX_RBBM_PRIMCTR_8_LO 0x00000550
+
+#define REG_A6XX_RBBM_PRIMCTR_8_HI 0x00000551
+
+#define REG_A6XX_RBBM_PRIMCTR_9_LO 0x00000552
+
+#define REG_A6XX_RBBM_PRIMCTR_9_HI 0x00000553
+
+#define REG_A6XX_RBBM_PRIMCTR_10_LO 0x00000554
+
+#define REG_A6XX_RBBM_PRIMCTR_10_HI 0x00000555
+
+#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE 0x0000f800
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A7XX_RBBM_SECVID_TSB_STATUS 0x0000fc00
+
+#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+
+#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+
+#define REG_A6XX_RBBM_GBIF_HALT 0x00000016
+
+#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017
+
+#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
+#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001
+
+#define REG_A7XX_RBBM_GBIF_HALT 0x00000016
+
+#define REG_A7XX_RBBM_GBIF_HALT_ACK 0x00000017
+
+#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
+
+#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
+
+#define REG_A7XX_RBBM_INT_2_MASK 0x0000003a
+
+#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
+
+#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f
+
+#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110
+
+#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112
+
+#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113
+
+#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a
+
+#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
+
+#define REG_A6XX_RBBM_CLOCK_HYST_HLSQ 0x0000011d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE 0x00000120
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE 0x00000121
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE 0x00000122
+
+#define REG_A6XX_RBBM_LPAC_GBIF_CLIENT_QOS_CNTL 0x000005ff
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
+
+static inline uint32_t REG_A6XX_VSC_PERFCTR_VSC_SEL(uint32_t i0) { return 0x00000cd8 + 0x1*i0; }
+
+#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
+
+#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
+
+#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
+
+#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX 0x00000e05
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE 0x00000e07
+
+#define REG_A6XX_UCHE_TRAP_BASE 0x00000e09
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN 0x00000e0b
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX 0x00000e0d
+
+#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17
+
+#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18
+
+#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0
+static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
+{
+ return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
+}
+
+static inline uint32_t REG_A6XX_UCHE_PERFCTR_UCHE_SEL(uint32_t i0) { return 0x00000e1c + 0x1*i0; }
+
+#define REG_A6XX_UCHE_GBIF_GX_CONFIG 0x00000e3a
+
+#define REG_A6XX_UCHE_CMDQ_CONFIG 0x00000e3c
+
+#define REG_A6XX_VBIF_VERSION 0x00003000
+
+#define REG_A6XX_VBIF_CLKON 0x00003001
+#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
+#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A6XX_GBIF_SCACHE_CNTL0 0x00003c01
+
+#define REG_A6XX_GBIF_SCACHE_CNTL1 0x00003c02
+
+#define REG_A6XX_GBIF_QSB_SIDE0 0x00003c03
+
+#define REG_A6XX_GBIF_QSB_SIDE1 0x00003c04
+
+#define REG_A6XX_GBIF_QSB_SIDE2 0x00003c05
+
+#define REG_A6XX_GBIF_QSB_SIDE3 0x00003c06
+
+#define REG_A6XX_GBIF_HALT 0x00003c45
+
+#define REG_A6XX_GBIF_HALT_ACK 0x00003c46
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_EN 0x00003cc0
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_CLR 0x00003cc1
+
+#define REG_A6XX_GBIF_PERF_CNT_SEL 0x00003cc2
+
+#define REG_A6XX_GBIF_PERF_PWR_CNT_SEL 0x00003cc3
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW0 0x00003cc4
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW1 0x00003cc5
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW2 0x00003cc6
+
+#define REG_A6XX_GBIF_PERF_CNT_LOW3 0x00003cc7
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH0 0x00003cc8
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH1 0x00003cc9
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH2 0x00003cca
+
+#define REG_A6XX_GBIF_PERF_CNT_HIGH3 0x00003ccb
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW0 0x00003ccc
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW1 0x00003ccd
+
+#define REG_A6XX_GBIF_PWR_CNT_LOW2 0x00003cce
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH0 0x00003ccf
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH1 0x00003cd0
+
+#define REG_A6XX_GBIF_PWR_CNT_HIGH2 0x00003cd1
+
+#define REG_A6XX_VSC_DBG_ECO_CNTL 0x00000c00
+
+#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
+#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_DRAW_STRM_SIZE_ADDRESS 0x00000c03
+
+#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
+#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
+#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1
+static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK;
+}
+#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800
+#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11
+static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
+}
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000
+#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000
+#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+#define REG_A6XX_VSC_PRIM_STRM_ADDRESS 0x00000c30
+
+#define REG_A6XX_VSC_PRIM_STRM_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PRIM_STRM_LIMIT 0x00000c33
+
+#define REG_A6XX_VSC_DRAW_STRM_ADDRESS 0x00000c34
+
+#define REG_A6XX_VSC_DRAW_STRM_PITCH 0x00000c36
+
+#define REG_A6XX_VSC_DRAW_STRM_LIMIT 0x00000c37
+
+static inline uint32_t REG_A6XX_VSC_STATE(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_STATE_REG(uint32_t i0) { return 0x00000c38 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PRIM_STRM_SIZE_REG(uint32_t i0) { return 0x00000c58 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_DRAW_STRM_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+
+#define REG_A6XX_GRAS_CL_CNTL 0x00008000
+#define A6XX_GRAS_CL_CNTL_CLIP_DISABLE 0x00000001
+#define A6XX_GRAS_CL_CNTL_ZNEAR_CLIP_DISABLE 0x00000002
+#define A6XX_GRAS_CL_CNTL_ZFAR_CLIP_DISABLE 0x00000004
+#define A6XX_GRAS_CL_CNTL_Z_CLAMP_ENABLE 0x00000020
+#define A6XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
+#define A6XX_GRAS_CL_CNTL_VP_CLIP_CODE_IGNORE 0x00000080
+#define A6XX_GRAS_CL_CNTL_VP_XFORM_DISABLE 0x00000100
+#define A6XX_GRAS_CL_CNTL_PERSP_DIVISION_DISABLE 0x00000200
+
+#define REG_A6XX_GRAS_VS_CL_CNTL 0x00008001
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_VS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_VS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_VS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_DS_CL_CNTL 0x00008002
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_DS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_DS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_DS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_GS_CL_CNTL 0x00008003
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK 0x0000ff00
+#define A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT 8
+static inline uint32_t A6XX_GRAS_GS_CL_CNTL_CULL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_GS_CL_CNTL_CULL_MASK__SHIFT) & A6XX_GRAS_GS_CL_CNTL_CULL_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_MAX_LAYER_INDEX 0x00008004
+
+#define REG_A6XX_GRAS_CNTL 0x00008005
+#define A6XX_GRAS_CNTL_IJ_PERSP_PIXEL 0x00000001
+#define A6XX_GRAS_CNTL_IJ_PERSP_CENTROID 0x00000002
+#define A6XX_GRAS_CNTL_IJ_PERSP_SAMPLE 0x00000004
+#define A6XX_GRAS_CNTL_IJ_LINEAR_PIXEL 0x00000008
+#define A6XX_GRAS_CNTL_IJ_LINEAR_CENTROID 0x00000010
+#define A6XX_GRAS_CNTL_IJ_LINEAR_SAMPLE 0x00000020
+#define A6XX_GRAS_CNTL_COORD_MASK__MASK 0x000003c0
+#define A6XX_GRAS_CNTL_COORD_MASK__SHIFT 6
+static inline uint32_t A6XX_GRAS_CNTL_COORD_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CNTL_COORD_MASK__SHIFT) & A6XX_GRAS_CNTL_COORD_MASK__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000001ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x0007fc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XOFFSET(uint32_t i0) { return 0x00008010 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_XSCALE(uint32_t i0) { return 0x00008011 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YOFFSET(uint32_t i0) { return 0x00008012 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_YSCALE(uint32_t i0) { return 0x00008013 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZOFFSET(uint32_t i0) { return 0x00008014 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_VPORT_ZSCALE(uint32_t i0) { return 0x00008015 + 0x6*i0; }
+#define A6XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MIN(uint32_t i0) { return 0x00008070 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__MASK 0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MIN(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MIN__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MIN__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_CL_Z_CLAMP_MAX(uint32_t i0) { return 0x00008071 + 0x2*i0; }
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__MASK 0xffffffff
+#define A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_Z_CLAMP_MAX(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_Z_CLAMP_MAX__SHIFT) & A6XX_GRAS_CL_Z_CLAMP_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CNTL 0x00008090
+#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A6XX_GRAS_SU_CNTL_UNK12__MASK 0x00001000
+#define A6XX_GRAS_SU_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK12__SHIFT) & A6XX_GRAS_SU_CNTL_UNK12__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_LINE_MODE__MASK 0x00002000
+#define A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT 13
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINE_MODE(enum a5xx_line_mode val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_LINE_MODE__SHIFT) & A6XX_GRAS_SU_CNTL_LINE_MODE__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_UNK15__MASK 0x00018000
+#define A6XX_GRAS_SU_CNTL_UNK15__SHIFT 15
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK15(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK15__SHIFT) & A6XX_GRAS_SU_CNTL_UNK15__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_UNK17 0x00020000
+#define A6XX_GRAS_SU_CNTL_MULTIVIEW_ENABLE 0x00040000
+#define A6XX_GRAS_SU_CNTL_UNK19__MASK 0x00780000
+#define A6XX_GRAS_SU_CNTL_UNK19__SHIFT 19
+static inline uint32_t A6XX_GRAS_SU_CNTL_UNK19(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CNTL_UNK19__SHIFT) & A6XX_GRAS_SU_CNTL_UNK19__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
+#define A6XX_GRAS_SU_POINT_SIZE__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_GRAS_SU_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000008
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x00008099
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK 0x00000006
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT 1
+static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_SHIFTAMOUNT__MASK;
+}
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_INNERCONSERVATIVERASEN 0x00000008
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK 0x00000030
+#define A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__SHIFT) & A6XX_GRAS_SU_CONSERVATIVE_RAS_CNTL_UNK4__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_PATH_RENDERING_CNTL 0x0000809a
+#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_UNK0 0x00000001
+#define A6XX_GRAS_SU_PATH_RENDERING_CNTL_LINELENGTHEN 0x00000002
+
+#define REG_A6XX_GRAS_VS_LAYER_CNTL 0x0000809b
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_VS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_GS_LAYER_CNTL 0x0000809c
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_GS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_DS_LAYER_CNTL 0x0000809d
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_LAYER 0x00000001
+#define A6XX_GRAS_DS_LAYER_CNTL_WRITES_VIEW 0x00000002
+
+#define REG_A6XX_GRAS_SC_CNTL 0x000080a0
+#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000007
+#define A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_GRAS_SC_CNTL_CCUSINGLECACHELINESIZE__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK 0x00000018
+#define A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT 3
+static inline uint32_t A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE(enum a6xx_single_prim_mode val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_SINGLE_PRIM_MODE__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK 0x00000020
+#define A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT 5
+static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_MODE__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK 0x000000c0
+#define A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT 6
+static inline uint32_t A6XX_GRAS_SC_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_GRAS_SC_CNTL_RASTER_DIRECTION__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK 0x00000100
+#define A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT 8
+static inline uint32_t A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION(enum a6xx_sequenced_thread_dist val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__SHIFT) & A6XX_GRAS_SC_CNTL_SEQUENCED_THREAD_DISTRIBUTION__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_UNK9 0x00000200
+#define A6XX_GRAS_SC_CNTL_ROTATION__MASK 0x00000c00
+#define A6XX_GRAS_SC_CNTL_ROTATION__SHIFT 10
+static inline uint32_t A6XX_GRAS_SC_CNTL_ROTATION(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_CNTL_ROTATION__SHIFT) & A6XX_GRAS_SC_CNTL_ROTATION__MASK;
+}
+#define A6XX_GRAS_SC_CNTL_EARLYVIZOUTEN 0x00001000
+
+#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
+#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x0000003f
+#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x00007f00
+#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
+#define A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT 18
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_GRAS_BIN_CONTROL_RENDER_MODE__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
+#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000
+#define A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_GRAS_BIN_CONTROL_BUFFERS_LOCATION__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
+#define A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_GRAS_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
+}
+#define A6XX_GRAS_BIN_CONTROL_UNK27__MASK 0x08000000
+#define A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT 27
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_UNK27(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_BIN_CONTROL_UNK27__SHIFT) & A6XX_GRAS_BIN_CONTROL_UNK27__MASK;
+}
+
+#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
+#define A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_UNK3__MASK;
+}
+
+#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_GRAS_SAMPLE_CONFIG 0x000080a4
+#define A6XX_GRAS_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_GRAS_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
+
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_0 0x000080a5
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SAMPLE_LOCATION_1 0x000080a6
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_GRAS_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL(uint32_t i0) { return 0x000080b0 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR(uint32_t i0) { return 0x000080b1 + 0x2*i0; }
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(uint32_t i0) { return 0x000080d0 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_Y__MASK;
+}
+
+static inline uint32_t REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(uint32_t i0) { return 0x000080d1 + 0x2*i0; }
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK 0x0000ffff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK 0xffff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00003fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x3fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00003fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x3fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
+#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define A6XX_GRAS_LRZ_CNTL_FC_ENABLE 0x00000008
+#define A6XX_GRAS_LRZ_CNTL_Z_TEST_ENABLE 0x00000010
+#define A6XX_GRAS_LRZ_CNTL_Z_BOUNDS_ENABLE 0x00000020
+#define A6XX_GRAS_LRZ_CNTL_DIR__MASK 0x000000c0
+#define A6XX_GRAS_LRZ_CNTL_DIR__SHIFT 6
+static inline uint32_t A6XX_GRAS_LRZ_CNTL_DIR(enum a6xx_lrz_dir_status val)
+{
+ return ((val) << A6XX_GRAS_LRZ_CNTL_DIR__SHIFT) & A6XX_GRAS_LRZ_CNTL_DIR__MASK;
+}
+#define A6XX_GRAS_LRZ_CNTL_DIR_WRITE 0x00000100
+#define A6XX_GRAS_LRZ_CNTL_DISABLE_ON_WRONG_DIR 0x00000200
+
+#define REG_A6XX_GRAS_LRZ_PS_INPUT_CNTL 0x00008101
+#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_SAMPLEID 0x00000001
+#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK 0x00000006
+#define A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT 1
+static inline uint32_t A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val)
+{
+ return ((val) << A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_GRAS_LRZ_PS_INPUT_CNTL_FRAGCOORDSAMPLEMODE__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_MRT_BUF_INFO_0 0x00008102
+#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__SHIFT) & A6XX_GRAS_LRZ_MRT_BUF_INFO_0_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE 0x00008103
+#define A6XX_GRAS_LRZ_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000000ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffffc00
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 10
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE 0x00008106
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__SHIFT) & A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_GRAS_SAMPLE_CNTL 0x00008109
+#define A6XX_GRAS_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
+
+#define REG_A6XX_GRAS_LRZ_DEPTH_VIEW 0x0000810a
+#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK 0x000007ff
+#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_LAYER__MASK;
+}
+#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK 0x07ff0000
+#define A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT 16
+static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_LAYER_COUNT__MASK;
+}
+#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK 0xf0000000
+#define A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT 28
+static inline uint32_t A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__SHIFT) & A6XX_GRAS_LRZ_DEPTH_VIEW_BASE_MIP_LEVEL__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
+
+#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
+#define A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_OVERWRITEEN 0x00000008
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK 0x00000070
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK4__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK 0x00060000
+#define A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT 17
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_UNK17(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_UNK17__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_D24S8 0x00080000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK 0x00f00000
+#define A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT 20
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_MASK__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
+#define A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT 24
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_IFMT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000
+#define A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_RASTER_MODE__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+
+#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
+#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
+#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8407 0x00008407
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8408 0x00008408
+
+#define REG_A6XX_GRAS_2D_UNKNOWN_8409 0x00008409
+
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_1 0x0000840a
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_X__MASK;
+}
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_RESOLVE_CNTL_2 0x0000840b
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK 0x00003fff
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_X__MASK;
+}
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK 0x3fff0000
+#define A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_2D_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_DBG_ECO_CNTL 0x00008600
+#define A6XX_GRAS_DBG_ECO_CNTL_UNK7 0x00000080
+#define A6XX_GRAS_DBG_ECO_CNTL_LRZCACHELOCKDIS 0x00000800
+
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
+
+#define REG_A7XX_GRAS_NC_MODE_CNTL 0x00008602
+
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_TSE_SEL(uint32_t i0) { return 0x00008610 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_RAS_SEL(uint32_t i0) { return 0x00008614 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_GRAS_PERFCTR_LRZ_SEL(uint32_t i0) { return 0x00008618 + 0x1*i0; }
+
+#define REG_A6XX_RB_BIN_CONTROL 0x00008800
+#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x0000003f
+#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x00007f00
+#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK 0x001c0000
+#define A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT 18
+static inline uint32_t A6XX_RB_BIN_CONTROL_RENDER_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_RENDER_MODE__SHIFT) & A6XX_RB_BIN_CONTROL_RENDER_MODE__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_FORCE_LRZ_WRITE_DIS 0x00200000
+#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK 0x00c00000
+#define A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT 22
+static inline uint32_t A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION(enum a6xx_buffers_location val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__SHIFT) & A6XX_RB_BIN_CONTROL_BUFFERS_LOCATION__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK 0x07000000
+#define A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT 24
+static inline uint32_t A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__SHIFT) & A6XX_RB_BIN_CONTROL_LRZ_FEEDBACK_ZMODE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_CNTL 0x00008801
+#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK 0x00000038
+#define A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT 3
+static inline uint32_t A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__SHIFT) & A6XX_RB_RENDER_CNTL_CCUSINGLECACHELINESIZE__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_EARLYVIZOUTEN 0x00000040
+#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080
+#define A6XX_RB_RENDER_CNTL_UNK8__MASK 0x00000700
+#define A6XX_RB_RENDER_CNTL_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_CNTL_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_UNK8__SHIFT) & A6XX_RB_RENDER_CNTL_UNK8__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK 0x00000100
+#define A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_MODE__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK 0x00000600
+#define A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT 9
+static inline uint32_t A6XX_RB_RENDER_CNTL_RASTER_DIRECTION(enum a6xx_raster_direction val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__SHIFT) & A6XX_RB_RENDER_CNTL_RASTER_DIRECTION__MASK;
+}
+#define A6XX_RB_RENDER_CNTL_CONSERVATIVERASEN 0x00000800
+#define A6XX_RB_RENDER_CNTL_INNERCONSERVATIVERASEN 0x00001000
+#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+
+#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK 0x00000004
+#define A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK2__MASK;
+}
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK 0x00000008
+#define A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT 3
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_UNK3__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_UNK3__MASK;
+}
+
+#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_RB_SAMPLE_CONFIG 0x00008804
+#define A6XX_RB_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_RB_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
+
+#define REG_A6XX_RB_SAMPLE_LOCATION_0 0x00008805
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_LOCATION_1 0x00008806
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_RB_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_PIXEL 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_CENTROID 0x00000002
+#define A6XX_RB_RENDER_CONTROL0_IJ_PERSP_SAMPLE 0x00000004
+#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_PIXEL 0x00000008
+#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_CENTROID 0x00000010
+#define A6XX_RB_RENDER_CONTROL0_IJ_LINEAR_SAMPLE 0x00000020
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK 0x000003c0
+#define A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT 6
+static inline uint32_t A6XX_RB_RENDER_CONTROL0_COORD_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CONTROL0_COORD_MASK__SHIFT) & A6XX_RB_RENDER_CONTROL0_COORD_MASK__MASK;
+}
+#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
+
+#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A6XX_RB_RENDER_CONTROL1_POSTDEPTHCOVERAGE 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000004
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
+#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK 0x00000030
+#define A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE(enum a6xx_fragcoord_sample_mode val)
+{
+ return ((val) << A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__SHIFT) & A6XX_RB_RENDER_CONTROL1_FRAGCOORDSAMPLEMODE__MASK;
+}
+#define A6XX_RB_RENDER_CONTROL1_CENTERRHW 0x00000040
+#define A6XX_RB_RENDER_CONTROL1_LINELENGTHEN 0x00000080
+#define A6XX_RB_RENDER_CONTROL1_FOVEATION 0x00000100
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK 0x00000004
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_STENCILREF 0x00000008
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d
+#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_RB_DITHER_CNTL 0x0000880e
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK;
+}
+
+#define REG_A6XX_RB_SRGB_CNTL 0x0000880f
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_RB_SAMPLE_CNTL 0x00008810
+#define A6XX_RB_SAMPLE_CNTL_PER_SAMP_MODE 0x00000001
+
+#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
+
+#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
+
+#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
+
+#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a
+
+#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b
+
+#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c
+
+#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d
+
+#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
+
+static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; }
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_UNK10__MASK 0x00000400
+#define A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT 10
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_UNK10(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_UNK10__SHIFT) & A6XX_RB_MRT_BUF_INFO_UNK10__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
+#define A6XX_RB_MRT_PITCH__MASK 0x0000ffff
+#define A6XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0x1fffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE__MASK 0xffffffff
+#define A6XX_RB_MRT_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_BASE__SHIFT) & A6XX_RB_MRT_BASE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+#define A6XX_RB_MRT_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_MRT_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_MRT_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_MRT_BASE_GMEM__SHIFT) & A6XX_RB_MRT_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
+#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861
+#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862
+#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863
+#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_CNTL 0x00008865
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_ONE 0x00000800
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK 0x00000003
+#define A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE(enum a6xx_ztest_mode val)
+{
+ return ((val) << A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__SHIFT) & A6XX_RB_DEPTH_PLANE_CNTL_Z_MODE__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
+#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000001
+#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A6XX_RB_DEPTH_CNTL_Z_CLAMP_ENABLE 0x00000020
+#define A6XX_RB_DEPTH_CNTL_Z_READ_ENABLE 0x00000040
+#define A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE 0x00000080
+
+#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK 0x00000018
+#define A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT 3
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_UNK3__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_UNK3__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0x00003fff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0x0fffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE 0x00008875
+#define A6XX_RB_DEPTH_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_DEPTH_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_DEPTH_BUFFER_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_Z_BOUNDS_MIN 0x00008878
+#define A6XX_RB_Z_BOUNDS_MIN__MASK 0xffffffff
+#define A6XX_RB_Z_BOUNDS_MIN__SHIFT 0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MIN(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_BOUNDS_MIN__SHIFT) & A6XX_RB_Z_BOUNDS_MIN__MASK;
+}
+
+#define REG_A6XX_RB_Z_BOUNDS_MAX 0x00008879
+#define A6XX_RB_Z_BOUNDS_MAX__MASK 0xffffffff
+#define A6XX_RB_Z_BOUNDS_MAX__SHIFT 0
+static inline uint32_t A6XX_RB_Z_BOUNDS_MAX(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_BOUNDS_MAX__SHIFT) & A6XX_RB_Z_BOUNDS_MAX__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_INFO 0x00008881
+#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A6XX_RB_STENCIL_INFO_UNK1 0x00000002
+
+#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0x00000fff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0x00ffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE 0x00008884
+#define A6XX_RB_STENCIL_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCIL_BUFFER_BASE__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_STENCIL_BUFFER_BASE_GMEM__SHIFT) & A6XX_RB_STENCIL_BUFFER_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_STENCILREF 0x00008887
+#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
+#define A6XX_RB_STENCILREF_REF__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
+}
+#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00
+#define A6XX_RB_STENCILREF_BFREF__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK;
+}
+
+#define REG_A6XX_RB_STENCILMASK 0x00008888
+#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
+#define A6XX_RB_STENCILMASK_MASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
+}
+#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK;
+}
+
+#define REG_A6XX_RB_STENCILWRMASK 0x00008889
+#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
+#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
+}
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
+#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_DISABLE 0x00000001
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A6XX_RB_LRZ_CNTL 0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+
+#define REG_A6XX_RB_Z_CLAMP_MIN 0x000088c0
+#define A6XX_RB_Z_CLAMP_MIN__MASK 0xffffffff
+#define A6XX_RB_Z_CLAMP_MIN__SHIFT 0
+static inline uint32_t A6XX_RB_Z_CLAMP_MIN(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_CLAMP_MIN__SHIFT) & A6XX_RB_Z_CLAMP_MIN__MASK;
+}
+
+#define REG_A6XX_RB_Z_CLAMP_MAX 0x000088c1
+#define A6XX_RB_Z_CLAMP_MAX__MASK 0xffffffff
+#define A6XX_RB_Z_CLAMP_MAX__SHIFT 0
+static inline uint32_t A6XX_RB_Z_CLAMP_MAX(float val)
+{
+ return ((fui(val)) << A6XX_RB_Z_CLAMP_MAX__SHIFT) & A6XX_RB_Z_CLAMP_MAX__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
+#define A6XX_RB_UNKNOWN_88D0_UNK0__MASK 0x00001fff
+#define A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT 0
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_88D0_UNK0__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK0__MASK;
+}
+#define A6XX_RB_UNKNOWN_88D0_UNK16__MASK 0x07ff0000
+#define A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT 16
+static inline uint32_t A6XX_RB_UNKNOWN_88D0_UNK16(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_88D0_UNK16__SHIFT) & A6XX_RB_UNKNOWN_88D0_UNK16__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00003fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x3fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00003fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x3fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
+#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x0000003f
+#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x00007f00
+#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
+#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00003fff
+#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x3fff0000
+#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_GMEM_MSAA_CNTL 0x000088d5
+#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK 0x00000018
+#define A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
+#define A6XX_RB_BLIT_BASE_GMEM__MASK 0xfffff000
+#define A6XX_RB_BLIT_BASE_GMEM__SHIFT 12
+static inline uint32_t A6XX_RB_BLIT_BASE_GMEM(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_BLIT_BASE_GMEM__SHIFT) & A6XX_RB_BLIT_BASE_GMEM__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_UNK15 0x00008000
+
+#define REG_A6XX_RB_BLIT_DST 0x000088d8
+#define A6XX_RB_BLIT_DST__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_DST__SHIFT) & A6XX_RB_BLIT_DST__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
+#define A6XX_RB_BLIT_DST_PITCH__MASK 0x0000ffff
+#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0x1fffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST 0x000088dc
+#define A6XX_RB_BLIT_FLAG_DST__MASK 0xffffffff
+#define A6XX_RB_BLIT_FLAG_DST__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_FLAG_DST__SHIFT) & A6XX_RB_BLIT_FLAG_DST__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_PITCH 0x000088de
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK 0x0ffff800
+#define A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_FLAG_DST_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2
+
+#define REG_A6XX_RB_BLIT_INFO 0x000088e3
+#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
+#define A6XX_RB_BLIT_INFO_GMEM 0x00000002
+#define A6XX_RB_BLIT_INFO_SAMPLE_0 0x00000004
+#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
+}
+#define A6XX_RB_BLIT_INFO_LAST__MASK 0x00000300
+#define A6XX_RB_BLIT_INFO_LAST__SHIFT 8
+static inline uint32_t A6XX_RB_BLIT_INFO_LAST(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_LAST__SHIFT) & A6XX_RB_BLIT_INFO_LAST__MASK;
+}
+#define A6XX_RB_BLIT_INFO_BUFFER_ID__MASK 0x0000f000
+#define A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT 12
+static inline uint32_t A6XX_RB_BLIT_INFO_BUFFER_ID(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_BUFFER_ID__SHIFT) & A6XX_RB_BLIT_INFO_BUFFER_ID__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_BASE 0x000088f1
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNK_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_UNK_FLAG_BUFFER_PITCH 0x000088f3
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x00fff800
+#define A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_UNK_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F4 0x000088f4
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE 0x00008900
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_BASE__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_BASE__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK 0x0000007f
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK 0x00000700
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT 8
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_UNK8__MASK;
+}
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x0ffff800
+#define A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK 0xffffffff
+#define A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_FLAG_BUFFER_ADDR__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_ADDR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x1ffff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 7) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR 0x00008927
+#define A6XX_RB_SAMPLE_COUNT_ADDR__MASK 0xffffffff
+#define A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT 0
+static inline uint32_t A6XX_RB_SAMPLE_COUNT_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_RB_SAMPLE_COUNT_ADDR__SHIFT) & A6XX_RB_SAMPLE_COUNT_ADDR__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_8A00 0x00008a00
+
+#define REG_A6XX_RB_UNKNOWN_8A10 0x00008a10
+
+#define REG_A6XX_RB_UNKNOWN_8A20 0x00008a20
+
+#define REG_A6XX_RB_UNKNOWN_8A30 0x00008a30
+
+#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK 0x00000007
+#define A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT 0
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_ROTATE(enum a6xx_rotation val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_ROTATE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_ROTATE__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_OVERWRITEEN 0x00000008
+#define A6XX_RB_2D_BLIT_CNTL_UNK4__MASK 0x00000070
+#define A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT 4
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK4(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK4__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK4__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_SOLID_COLOR 0x00000080
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__MASK 0x00060000
+#define A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT 17
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_UNK17(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_UNK17__SHIFT) & A6XX_RB_2D_BLIT_CNTL_UNK17__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_D24S8 0x00080000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__MASK 0x00f00000
+#define A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT 20
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_MASK__SHIFT) & A6XX_RB_2D_BLIT_CNTL_MASK__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__MASK 0x1f000000
+#define A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT 24
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_IFMT(enum a6xx_2d_ifmt val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_IFMT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_IFMT__MASK;
+}
+#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK 0x20000000
+#define A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT 29
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_RASTER_MODE(enum a6xx_raster_mode val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__SHIFT) & A6XX_RB_2D_BLIT_CNTL_RASTER_MODE__MASK;
+}
+
+#define REG_A6XX_RB_2D_UNKNOWN_8C01 0x00008c01
+
+#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
+#define A6XX_RB_2D_DST_INFO_SRGB 0x00002000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__MASK 0x0000c000
+#define A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT 14
+static inline uint32_t A6XX_RB_2D_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_2D_DST_INFO_SAMPLES__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FILTER 0x00010000
+#define A6XX_RB_2D_DST_INFO_UNK17 0x00020000
+#define A6XX_RB_2D_DST_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_RB_2D_DST_INFO_UNK19 0x00080000
+#define A6XX_RB_2D_DST_INFO_UNK20 0x00100000
+#define A6XX_RB_2D_DST_INFO_UNK21 0x00200000
+#define A6XX_RB_2D_DST_INFO_UNK22 0x00400000
+#define A6XX_RB_2D_DST_INFO_UNK23__MASK 0x07800000
+#define A6XX_RB_2D_DST_INFO_UNK23__SHIFT 23
+static inline uint32_t A6XX_RB_2D_DST_INFO_UNK23(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_UNK23__SHIFT) & A6XX_RB_2D_DST_INFO_UNK23__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_UNK28 0x10000000
+
+#define REG_A6XX_RB_2D_DST 0x00008c18
+#define A6XX_RB_2D_DST__MASK 0xffffffff
+#define A6XX_RB_2D_DST__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST__SHIFT) & A6XX_RB_2D_DST__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PITCH 0x00008c1a
+#define A6XX_RB_2D_DST_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_PITCH__SHIFT) & A6XX_RB_2D_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE1 0x00008c1b
+#define A6XX_RB_2D_DST_PLANE1__MASK 0xffffffff
+#define A6XX_RB_2D_DST_PLANE1__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE1(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_PLANE1__SHIFT) & A6XX_RB_2D_DST_PLANE1__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE_PITCH 0x00008c1d
+#define A6XX_RB_2D_DST_PLANE_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_PLANE2 0x00008c1e
+#define A6XX_RB_2D_DST_PLANE2__MASK 0xffffffff
+#define A6XX_RB_2D_DST_PLANE2__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_PLANE2(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_PLANE2__SHIFT) & A6XX_RB_2D_DST_PLANE2__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS 0x00008c20
+#define A6XX_RB_2D_DST_FLAGS__MASK 0xffffffff
+#define A6XX_RB_2D_DST_FLAGS__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_FLAGS__SHIFT) & A6XX_RB_2D_DST_FLAGS__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PITCH 0x00008c22
+#define A6XX_RB_2D_DST_FLAGS_PITCH__MASK 0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE 0x00008c23
+#define A6XX_RB_2D_DST_FLAGS_PLANE__MASK 0xffffffff
+#define A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE(uint32_t val)
+{
+ return ((val) << A6XX_RB_2D_DST_FLAGS_PLANE__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_PLANE_PITCH 0x00008c25
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK 0x000000ff
+#define A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_FLAGS_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__SHIFT) & A6XX_RB_2D_DST_FLAGS_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f
+
+#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+
+#define REG_A6XX_RB_DBG_ECO_CNTL 0x00008e04
+
+#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+
+#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+#define A6XX_RB_CCU_CNTL_CONCURRENT_RESOLVE 0x00000004
+#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK 0x00000080
+#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT 7
+static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI(uint32_t val)
+{
+ return ((val) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET_HI__MASK;
+}
+#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK 0x00000200
+#define A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT 9
+static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI(uint32_t val)
+{
+ return ((val) << A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET_HI__MASK;
+}
+#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK 0x001ff000
+#define A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT 12
+static inline uint32_t A6XX_RB_CCU_CNTL_DEPTH_OFFSET(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_CCU_CNTL_DEPTH_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_DEPTH_OFFSET__MASK;
+}
+#define A6XX_RB_CCU_CNTL_GMEM 0x00400000
+#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK 0xff800000
+#define A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT 23
+static inline uint32_t A6XX_RB_CCU_CNTL_COLOR_OFFSET(uint32_t val)
+{
+ return ((val >> 12) << A6XX_RB_CCU_CNTL_COLOR_OFFSET__SHIFT) & A6XX_RB_CCU_CNTL_COLOR_OFFSET__MASK;
+}
+
+#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
+#define A6XX_RB_NC_MODE_CNTL_MODE 0x00000001
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
+#define A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_LOWER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
+#define A6XX_RB_NC_MODE_CNTL_AMSBC 0x00000010
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000400
+#define A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT 10
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_RB_NC_MODE_CNTL_UPPER_BIT__MASK;
+}
+#define A6XX_RB_NC_MODE_CNTL_RGB565_PREDICATOR 0x00000800
+#define A6XX_RB_NC_MODE_CNTL_UNK12__MASK 0x00003000
+#define A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT 12
+static inline uint32_t A6XX_RB_NC_MODE_CNTL_UNK12(uint32_t val)
+{
+ return ((val) << A6XX_RB_NC_MODE_CNTL_UNK12__SHIFT) & A6XX_RB_NC_MODE_CNTL_UNK12__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_PERFCTR_RB_SEL(uint32_t i0) { return 0x00008e10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_RB_PERFCTR_CCU_SEL(uint32_t i0) { return 0x00008e18 + 0x1*i0; }
+
+#define REG_A6XX_RB_UNKNOWN_8E28 0x00008e28
+
+static inline uint32_t REG_A6XX_RB_PERFCTR_CMP_SEL(uint32_t i0) { return 0x00008e2c + 0x1*i0; }
+
+static inline uint32_t REG_A7XX_RB_PERFCTR_UFC_SEL(uint32_t i0) { return 0x00008e30 + 0x1*i0; }
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST 0x00008e3b
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
+
+#define REG_A6XX_RB_UNKNOWN_8E51 0x00008e51
+#define A6XX_RB_UNKNOWN_8E51__MASK 0xffffffff
+#define A6XX_RB_UNKNOWN_8E51__SHIFT 0
+static inline uint32_t A6XX_RB_UNKNOWN_8E51(uint32_t val)
+{
+ return ((val) << A6XX_RB_UNKNOWN_8E51__SHIFT) & A6XX_RB_UNKNOWN_8E51__MASK;
+}
+
+#define REG_A6XX_VPC_GS_PARAM 0x00009100
+#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK 0x000000ff
+#define A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_PARAM_LINELENGTHLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PARAM_LINELENGTHLOC__SHIFT) & A6XX_VPC_GS_PARAM_LINELENGTHLOC__MASK;
+}
+
+#define REG_A6XX_VPC_VS_CLIP_CNTL 0x00009101
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_VS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_CLIP_CNTL 0x00009102
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_GS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_CLIP_CNTL 0x00009103
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK 0x000000ff
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_MASK__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_03_LOC__MASK;
+}
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__SHIFT) & A6XX_VPC_DS_CLIP_CNTL_CLIP_DIST_47_LOC__MASK;
+}
+
+#define REG_A6XX_VPC_VS_LAYER_CNTL 0x00009104
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_VS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_GS_LAYER_CNTL 0x00009105
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_GS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_DS_LAYER_CNTL 0x00009106
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK 0x000000ff
+#define A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_LAYERLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_LAYERLOC__MASK;
+}
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_LAYER_CNTL_VIEWLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__SHIFT) & A6XX_VPC_DS_LAYER_CNTL_VIEWLOC__MASK;
+}
+
+#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
+#define A6XX_VPC_UNKNOWN_9107_RASTER_DISCARD 0x00000001
+#define A6XX_VPC_UNKNOWN_9107_UNK2 0x00000004
+
+#define REG_A6XX_VPC_POLYGON_MODE 0x00009108
+#define A6XX_VPC_POLYGON_MODE_MODE__MASK 0x00000003
+#define A6XX_VPC_POLYGON_MODE_MODE__SHIFT 0
+static inline uint32_t A6XX_VPC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A6XX_VPC_POLYGON_MODE_MODE__SHIFT) & A6XX_VPC_POLYGON_MODE_MODE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210
+
+#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
+
+static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+#define REG_A6XX_VPC_SO_CNTL 0x00009216
+#define A6XX_VPC_SO_CNTL_ADDR__MASK 0x000000ff
+#define A6XX_VPC_SO_CNTL_ADDR__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_CNTL_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_CNTL_ADDR__SHIFT) & A6XX_VPC_SO_CNTL_ADDR__MASK;
+}
+#define A6XX_VPC_SO_CNTL_RESET 0x00010000
+
+#define REG_A6XX_VPC_SO_PROG 0x00009217
+#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_EN 0x00000800
+#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_EN 0x00800000
+
+#define REG_A6XX_VPC_SO_STREAM_COUNTS 0x00009218
+#define A6XX_VPC_SO_STREAM_COUNTS__MASK 0xffffffff
+#define A6XX_VPC_SO_STREAM_COUNTS__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_STREAM_COUNTS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_COUNTS__SHIFT) & A6XX_VPC_SO_STREAM_COUNTS__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_BASE__MASK 0xffffffff
+#define A6XX_VPC_SO_BUFFER_BASE__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_BUFFER_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_BUFFER_BASE__SHIFT) & A6XX_VPC_SO_BUFFER_BASE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_SIZE__MASK 0xfffffffc
+#define A6XX_VPC_SO_BUFFER_SIZE__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_BUFFER_SIZE(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_BUFFER_SIZE__SHIFT) & A6XX_VPC_SO_BUFFER_SIZE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_STRIDE(uint32_t i0) { return 0x0000921d + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_STRIDE__MASK 0x000003ff
+#define A6XX_VPC_SO_BUFFER_STRIDE__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_BUFFER_STRIDE(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_BUFFER_STRIDE__SHIFT) & A6XX_VPC_SO_BUFFER_STRIDE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+#define A6XX_VPC_SO_BUFFER_OFFSET__MASK 0xfffffffc
+#define A6XX_VPC_SO_BUFFER_OFFSET__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_BUFFER_OFFSET(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_BUFFER_OFFSET__SHIFT) & A6XX_VPC_SO_BUFFER_OFFSET__MASK;
+}
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+#define A6XX_VPC_SO_FLUSH_BASE__MASK 0xffffffff
+#define A6XX_VPC_SO_FLUSH_BASE__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_FLUSH_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_FLUSH_BASE__SHIFT) & A6XX_VPC_SO_FLUSH_BASE__MASK;
+}
+
+#define REG_A6XX_VPC_POINT_COORD_INVERT 0x00009236
+#define A6XX_VPC_POINT_COORD_INVERT_INVERT 0x00000001
+
+#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
+
+#define REG_A6XX_VPC_VS_PACK 0x00009301
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_VS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_VS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_VS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_VS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_VS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_VS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_VS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_VS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_VS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_VS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_VS_PACK_EXTRAPOS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_VS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_VS_PACK_EXTRAPOS__MASK;
+}
+
+#define REG_A6XX_VPC_GS_PACK 0x00009302
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_GS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_GS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_GS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_GS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_GS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_GS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_GS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_GS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_GS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_GS_PACK_EXTRAPOS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_GS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_GS_PACK_EXTRAPOS__MASK;
+}
+
+#define REG_A6XX_VPC_DS_PACK 0x00009303
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_DS_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_DS_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_DS_PACK_POSITIONLOC__MASK 0x0000ff00
+#define A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_DS_PACK_POSITIONLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_POSITIONLOC__SHIFT) & A6XX_VPC_DS_PACK_POSITIONLOC__MASK;
+}
+#define A6XX_VPC_DS_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_DS_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_DS_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_PSIZELOC__SHIFT) & A6XX_VPC_DS_PACK_PSIZELOC__MASK;
+}
+#define A6XX_VPC_DS_PACK_EXTRAPOS__MASK 0x0f000000
+#define A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT 24
+static inline uint32_t A6XX_VPC_DS_PACK_EXTRAPOS(uint32_t val)
+{
+ return ((val) << A6XX_VPC_DS_PACK_EXTRAPOS__SHIFT) & A6XX_VPC_DS_PACK_EXTRAPOS__MASK;
+}
+
+#define REG_A6XX_VPC_CNTL_0 0x00009304
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__MASK 0x0000ff00
+#define A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT 8
+static inline uint32_t A6XX_VPC_CNTL_0_PRIMIDLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_PRIMIDLOC__SHIFT) & A6XX_VPC_CNTL_0_PRIMIDLOC__MASK;
+}
+#define A6XX_VPC_CNTL_0_VARYING 0x00010000
+#define A6XX_VPC_CNTL_0_VIEWIDLOC__MASK 0xff000000
+#define A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT 24
+static inline uint32_t A6XX_VPC_CNTL_0_VIEWIDLOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_VIEWIDLOC__SHIFT) & A6XX_VPC_CNTL_0_VIEWIDLOC__MASK;
+}
+
+#define REG_A6XX_VPC_SO_STREAM_CNTL 0x00009305
+#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK 0x00000007
+#define A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF0_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK 0x00000038
+#define A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT 3
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF1_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK 0x000001c0
+#define A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT 6
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF2_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK 0x00000e00
+#define A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT 9
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_BUF3_STREAM__MASK;
+}
+#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000
+#define A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15
+static inline uint32_t A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_VPC_SO_STREAM_CNTL_STREAM_ENABLE__MASK;
+}
+
+#define REG_A6XX_VPC_SO_DISABLE 0x00009306
+#define A6XX_VPC_SO_DISABLE_DISABLE 0x00000001
+
+#define REG_A6XX_VPC_DBG_ECO_CNTL 0x00009600
+
+#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
+
+#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
+
+#define REG_A6XX_VPC_UNKNOWN_9603 0x00009603
+
+static inline uint32_t REG_A6XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x00009604 + 0x1*i0; }
+
+static inline uint32_t REG_A7XX_VPC_PERFCTR_VPC_SEL(uint32_t i0) { return 0x0000960b + 0x1*i0; }
+
+#define REG_A6XX_PC_TESS_NUM_VERTEX 0x00009800
+
+#define REG_A6XX_PC_HS_INPUT_SIZE 0x00009801
+#define A6XX_PC_HS_INPUT_SIZE_SIZE__MASK 0x000007ff
+#define A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT 0
+static inline uint32_t A6XX_PC_HS_INPUT_SIZE_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_INPUT_SIZE_SIZE__SHIFT) & A6XX_PC_HS_INPUT_SIZE_SIZE__MASK;
+}
+#define A6XX_PC_HS_INPUT_SIZE_UNK13__MASK 0x00002000
+#define A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT 13
+static inline uint32_t A6XX_PC_HS_INPUT_SIZE_UNK13(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_INPUT_SIZE_UNK13__SHIFT) & A6XX_PC_HS_INPUT_SIZE_UNK13__MASK;
+}
+
+#define REG_A6XX_PC_TESS_CNTL 0x00009802
+#define A6XX_PC_TESS_CNTL_SPACING__MASK 0x00000003
+#define A6XX_PC_TESS_CNTL_SPACING__SHIFT 0
+static inline uint32_t A6XX_PC_TESS_CNTL_SPACING(enum a6xx_tess_spacing val)
+{
+ return ((val) << A6XX_PC_TESS_CNTL_SPACING__SHIFT) & A6XX_PC_TESS_CNTL_SPACING__MASK;
+}
+#define A6XX_PC_TESS_CNTL_OUTPUT__MASK 0x0000000c
+#define A6XX_PC_TESS_CNTL_OUTPUT__SHIFT 2
+static inline uint32_t A6XX_PC_TESS_CNTL_OUTPUT(enum a6xx_tess_output val)
+{
+ return ((val) << A6XX_PC_TESS_CNTL_OUTPUT__SHIFT) & A6XX_PC_TESS_CNTL_OUTPUT__MASK;
+}
+
+#define REG_A6XX_PC_RESTART_INDEX 0x00009803
+
+#define REG_A6XX_PC_MODE_CNTL 0x00009804
+
+#define REG_A6XX_PC_POWER_CNTL 0x00009805
+
+#define REG_A6XX_PC_PRIMID_PASSTHRU 0x00009806
+
+#define REG_A6XX_PC_SO_STREAM_CNTL 0x00009808
+#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK 0x00078000
+#define A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT 15
+static inline uint32_t A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__SHIFT) & A6XX_PC_SO_STREAM_CNTL_STREAM_ENABLE__MASK;
+}
+
+#define REG_A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL 0x0000980a
+#define A6XX_PC_DGEN_SU_CONSERVATIVE_RAS_CNTL_CONSERVATIVERASEN 0x00000001
+
+#define REG_A6XX_PC_DRAW_CMD 0x00009840
+#define A6XX_PC_DRAW_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_PC_DRAW_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_PC_DRAW_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_DRAW_CMD_STATE_ID__SHIFT) & A6XX_PC_DRAW_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_DISPATCH_CMD 0x00009841
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_PC_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_PC_DISPATCH_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_EVENT_CMD 0x00009842
+#define A6XX_PC_EVENT_CMD_STATE_ID__MASK 0x00ff0000
+#define A6XX_PC_EVENT_CMD_STATE_ID__SHIFT 16
+static inline uint32_t A6XX_PC_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_PC_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_PC_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_PC_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_PC_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_PC_MARKER 0x00009880
+
+#define REG_A6XX_PC_POLYGON_MODE 0x00009981
+#define A6XX_PC_POLYGON_MODE_MODE__MASK 0x00000003
+#define A6XX_PC_POLYGON_MODE_MODE__SHIFT 0
+static inline uint32_t A6XX_PC_POLYGON_MODE_MODE(enum a6xx_polygon_mode val)
+{
+ return ((val) << A6XX_PC_POLYGON_MODE_MODE__SHIFT) & A6XX_PC_POLYGON_MODE_MODE__MASK;
+}
+
+#define REG_A6XX_PC_RASTER_CNTL 0x00009980
+#define A6XX_PC_RASTER_CNTL_STREAM__MASK 0x00000003
+#define A6XX_PC_RASTER_CNTL_STREAM__SHIFT 0
+static inline uint32_t A6XX_PC_RASTER_CNTL_STREAM(uint32_t val)
+{
+ return ((val) << A6XX_PC_RASTER_CNTL_STREAM__SHIFT) & A6XX_PC_RASTER_CNTL_STREAM__MASK;
+}
+#define A6XX_PC_RASTER_CNTL_DISCARD 0x00000004
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
+#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
+#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+#define A6XX_PC_PRIMITIVE_CNTL_0_TESS_UPPER_LEFT_DOMAIN_ORIGIN 0x00000004
+#define A6XX_PC_PRIMITIVE_CNTL_0_UNK3 0x00000008
+
+#define REG_A6XX_PC_VS_OUT_CNTL 0x00009b01
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_VS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_VS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_VS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_VS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_VS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_VS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_VS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_VS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_GS_OUT_CNTL 0x00009b02
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_GS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_GS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_GS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_GS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_GS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_GS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_GS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_GS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_HS_OUT_CNTL 0x00009b03
+#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_HS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_HS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_HS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_HS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_HS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_HS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_HS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_HS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_DS_OUT_CNTL 0x00009b04
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__SHIFT) & A6XX_PC_DS_OUT_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_PC_DS_OUT_CNTL_PSIZE 0x00000100
+#define A6XX_PC_DS_OUT_CNTL_LAYER 0x00000200
+#define A6XX_PC_DS_OUT_CNTL_VIEW 0x00000400
+#define A6XX_PC_DS_OUT_CNTL_PRIMITIVE_ID 0x00000800
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK 0x00ff0000
+#define A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT 16
+static inline uint32_t A6XX_PC_DS_OUT_CNTL_CLIP_MASK(uint32_t val)
+{
+ return ((val) << A6XX_PC_DS_OUT_CNTL_CLIP_MASK__SHIFT) & A6XX_PC_DS_OUT_CNTL_CLIP_MASK__MASK;
+}
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_5 0x00009b05
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK 0x000000ff
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_VERTICES_OUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK 0x00007c00
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT 10
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_INVOCATIONS__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_LINELENGTHEN 0x00008000
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK 0x00030000
+#define A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT 16
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT(enum a6xx_tess_output val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_GS_OUTPUT__MASK;
+}
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK 0x00040000
+#define A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT 18
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_5_UNK18(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_5_UNK18__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_5_UNK18__MASK;
+}
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_6 0x00009b06
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK 0x000007ff
+#define A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_6_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A6XX_PC_MULTIVIEW_CNTL 0x00009b07
+#define A6XX_PC_MULTIVIEW_CNTL_ENABLE 0x00000001
+#define A6XX_PC_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
+#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
+#define A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT 2
+static inline uint32_t A6XX_PC_MULTIVIEW_CNTL_VIEWS(uint32_t val)
+{
+ return ((val) << A6XX_PC_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_PC_MULTIVIEW_CNTL_VIEWS__MASK;
+}
+
+#define REG_A6XX_PC_MULTIVIEW_MASK 0x00009b08
+
+#define REG_A6XX_PC_2D_EVENT_CMD 0x00009c00
+#define A6XX_PC_2D_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_PC_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_PC_2D_EVENT_CMD_EVENT__MASK;
+}
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
+#define A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT 8
+static inline uint32_t A6XX_PC_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_PC_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_PC_2D_EVENT_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
+
+#define REG_A6XX_PC_DRAW_INDX_BASE 0x00009e04
+
+#define REG_A6XX_PC_DRAW_FIRST_INDX 0x00009e06
+
+#define REG_A6XX_PC_DRAW_MAX_INDICES 0x00009e07
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR 0x00009e08
+#define A6XX_PC_TESSFACTOR_ADDR__MASK 0xffffffff
+#define A6XX_PC_TESSFACTOR_ADDR__SHIFT 0
+static inline uint32_t A6XX_PC_TESSFACTOR_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_PC_TESSFACTOR_ADDR__SHIFT) & A6XX_PC_TESSFACTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_PC_DRAW_INITIATOR 0x00009e0b
+#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A6XX_PC_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK 0x00000300
+#define A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT 8
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_VIS_CULL__SHIFT) & A6XX_PC_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000c00
+#define A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT 10
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A6XX_PC_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK 0x00003000
+#define A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT 12
+static inline uint32_t A6XX_PC_DRAW_INITIATOR_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__SHIFT) & A6XX_PC_DRAW_INITIATOR_PATCH_TYPE__MASK;
+}
+#define A6XX_PC_DRAW_INITIATOR_GS_ENABLE 0x00010000
+#define A6XX_PC_DRAW_INITIATOR_TESS_ENABLE 0x00020000
+
+#define REG_A6XX_PC_DRAW_NUM_INSTANCES 0x00009e0c
+
+#define REG_A6XX_PC_DRAW_NUM_INDICES 0x00009e0d
+
+#define REG_A6XX_PC_VSTREAM_CONTROL 0x00009e11
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__MASK 0x0000ffff
+#define A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT 0
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_UNK0__SHIFT) & A6XX_PC_VSTREAM_CONTROL_UNK0__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK 0x003f0000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT 16
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_SIZE__MASK;
+}
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK 0x07c00000
+#define A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT 22
+static inline uint32_t A6XX_PC_VSTREAM_CONTROL_VSC_N(uint32_t val)
+{
+ return ((val) << A6XX_PC_VSTREAM_CONTROL_VSC_N__SHIFT) & A6XX_PC_VSTREAM_CONTROL_VSC_N__MASK;
+}
+
+#define REG_A6XX_PC_BIN_PRIM_STRM 0x00009e12
+#define A6XX_PC_BIN_PRIM_STRM__MASK 0xffffffff
+#define A6XX_PC_BIN_PRIM_STRM__SHIFT 0
+static inline uint32_t A6XX_PC_BIN_PRIM_STRM(uint32_t val)
+{
+ return ((val) << A6XX_PC_BIN_PRIM_STRM__SHIFT) & A6XX_PC_BIN_PRIM_STRM__MASK;
+}
+
+#define REG_A6XX_PC_BIN_DRAW_STRM 0x00009e14
+#define A6XX_PC_BIN_DRAW_STRM__MASK 0xffffffff
+#define A6XX_PC_BIN_DRAW_STRM__SHIFT 0
+static inline uint32_t A6XX_PC_BIN_DRAW_STRM(uint32_t val)
+{
+ return ((val) << A6XX_PC_BIN_DRAW_STRM__SHIFT) & A6XX_PC_BIN_DRAW_STRM__MASK;
+}
+
+#define REG_A6XX_PC_VISIBILITY_OVERRIDE 0x00009e1c
+#define A6XX_PC_VISIBILITY_OVERRIDE_OVERRIDE 0x00000001
+
+static inline uint32_t REG_A6XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e34 + 0x1*i0; }
+
+static inline uint32_t REG_A7XX_PC_PERFCTR_PC_SEL(uint32_t i0) { return 0x00009e42 + 0x1*i0; }
+
+#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
+
+#define REG_A6XX_VFD_CONTROL_0 0x0000a000
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__MASK 0x0000003f
+#define A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_0_FETCH_CNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_FETCH_CNT__SHIFT) & A6XX_VFD_CONTROL_0_FETCH_CNT__MASK;
+}
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__MASK 0x00003f00
+#define A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_0_DECODE_CNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_DECODE_CNT__SHIFT) & A6XX_VFD_CONTROL_0_DECODE_CNT__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_1 0x0000a001
+#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK 0xff000000
+#define A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VIEWID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VIEWID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VIEWID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_2 0x0000a002
+#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_HSRELPATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_INVOCATIONID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_INVOCATIONID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_3 0x0000a003
+#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSPRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_DSPRIMID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSPRIMID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_DSRELPATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_4 0x0000a004
+#define A6XX_VFD_CONTROL_4_UNK0__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_4_UNK0__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_4_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_4_UNK0__SHIFT) & A6XX_VFD_CONTROL_4_UNK0__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_5 0x0000a005
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_5_REGID_GSHEADER(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_5_REGID_GSHEADER__SHIFT) & A6XX_VFD_CONTROL_5_REGID_GSHEADER__MASK;
+}
+#define A6XX_VFD_CONTROL_5_UNK8__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_5_UNK8__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_5_UNK8(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_5_UNK8__SHIFT) & A6XX_VFD_CONTROL_5_UNK8__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_6 0x0000a006
+#define A6XX_VFD_CONTROL_6_PRIMID_PASSTHRU 0x00000001
+
+#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
+#define A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK 0x00000007
+#define A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT 0
+static inline uint32_t A6XX_VFD_MODE_CNTL_RENDER_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A6XX_VFD_MODE_CNTL_RENDER_MODE__SHIFT) & A6XX_VFD_MODE_CNTL_RENDER_MODE__MASK;
+}
+
+#define REG_A6XX_VFD_MULTIVIEW_CNTL 0x0000a008
+#define A6XX_VFD_MULTIVIEW_CNTL_ENABLE 0x00000001
+#define A6XX_VFD_MULTIVIEW_CNTL_DISABLEMULTIPOS 0x00000002
+#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK 0x0000007c
+#define A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT 2
+static inline uint32_t A6XX_VFD_MULTIVIEW_CNTL_VIEWS(uint32_t val)
+{
+ return ((val) << A6XX_VFD_MULTIVIEW_CNTL_VIEWS__SHIFT) & A6XX_VFD_MULTIVIEW_CNTL_VIEWS__MASK;
+}
+
+#define REG_A6XX_VFD_ADD_OFFSET 0x0000a009
+#define A6XX_VFD_ADD_OFFSET_VERTEX 0x00000001
+#define A6XX_VFD_ADD_OFFSET_INSTANCE 0x00000002
+
+#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
+
+#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
+
+static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+#define A6XX_VFD_FETCH_BASE__MASK 0xffffffff
+#define A6XX_VFD_FETCH_BASE__SHIFT 0
+static inline uint32_t A6XX_VFD_FETCH_BASE(uint32_t val)
+{
+ return ((val) << A6XX_VFD_FETCH_BASE__SHIFT) & A6XX_VFD_FETCH_BASE__MASK;
+}
+
+static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_OFFSET__MASK 0x0001ffe0
+#define A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT 5
+static inline uint32_t A6XX_VFD_DECODE_INSTR_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_OFFSET__SHIFT) & A6XX_VFD_DECODE_INSTR_OFFSET__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A6XX_VFD_POWER_CNTL 0x0000a0f8
+
+#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
+
+static inline uint32_t REG_A6XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; }
+
+static inline uint32_t REG_A7XX_VFD_PERFCTR_VFD_SEL(uint32_t i0) { return 0x0000a610 + 0x1*i0; }
+
+#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_EARLYPREAMBLE 0x00200000
+#define A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_VS_BRANCH_COND 0x0000a801
+
+#define REG_A6XX_SP_VS_PRIMITIVE_CNTL 0x0000a802
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_VS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_VS_OBJ_FIRST_EXEC_OFFSET 0x0000a81b
+
+#define REG_A6XX_SP_VS_OBJ_START 0x0000a81c
+#define A6XX_SP_VS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_VS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OBJ_START__SHIFT) & A6XX_SP_VS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_PARAM 0x0000a81e
+#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_VS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_ADDR 0x0000a81f
+#define A6XX_SP_VS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_VS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_VS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_VS_PVT_MEM_SIZE 0x0000a821
+#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_VS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_VS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
+
+#define REG_A6XX_SP_VS_CONFIG 0x0000a823
+#define A6XX_SP_VS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_VS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_VS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_VS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_VS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_VS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NIBO__SHIFT) & A6XX_SP_VS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
+
+#define REG_A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET 0x0000a825
+#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_VS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
+#define A6XX_SP_HS_CTRL_REG0_EARLYPREAMBLE 0x00100000
+#define A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_HS_WAVE_INPUT_SIZE 0x0000a831
+
+#define REG_A6XX_SP_HS_BRANCH_COND 0x0000a832
+
+#define REG_A6XX_SP_HS_OBJ_FIRST_EXEC_OFFSET 0x0000a833
+
+#define REG_A6XX_SP_HS_OBJ_START 0x0000a834
+#define A6XX_SP_HS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_HS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_HS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_OBJ_START__SHIFT) & A6XX_SP_HS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_HS_PVT_MEM_PARAM 0x0000a836
+#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_HS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_HS_PVT_MEM_ADDR 0x0000a837
+#define A6XX_SP_HS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_HS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_HS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_HS_PVT_MEM_SIZE 0x0000a839
+#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_HS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_HS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
+
+#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
+#define A6XX_SP_HS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_HS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_HS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_HS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_HS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_HS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NIBO__SHIFT) & A6XX_SP_HS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
+
+#define REG_A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET 0x0000a83d
+#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_HS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
+#define A6XX_SP_DS_CTRL_REG0_EARLYPREAMBLE 0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_DS_BRANCH_COND 0x0000a841
+
+#define REG_A6XX_SP_DS_PRIMITIVE_CNTL 0x0000a842
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_DS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_OUT(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_OUT_REG(uint32_t i0) { return 0x0000a843 + 0x1*i0; }
+#define A6XX_SP_DS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_DS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_DS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_DS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_DS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_DS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_DS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_DS_VPC_DST_REG(uint32_t i0) { return 0x0000a853 + 0x1*i0; }
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_DS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_DS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_DS_OBJ_FIRST_EXEC_OFFSET 0x0000a85b
+
+#define REG_A6XX_SP_DS_OBJ_START 0x0000a85c
+#define A6XX_SP_DS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_DS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_DS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_OBJ_START__SHIFT) & A6XX_SP_DS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_PARAM 0x0000a85e
+#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_DS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_ADDR 0x0000a85f
+#define A6XX_SP_DS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_DS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_DS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_DS_PVT_MEM_SIZE 0x0000a861
+#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_DS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_DS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
+
+#define REG_A6XX_SP_DS_CONFIG 0x0000a863
+#define A6XX_SP_DS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_DS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_DS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_DS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_DS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_DS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NIBO__SHIFT) & A6XX_SP_DS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
+
+#define REG_A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET 0x0000a865
+#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_DS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
+#define A6XX_SP_GS_CTRL_REG0_EARLYPREAMBLE 0x00100000
+#define A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_GS_PRIM_SIZE 0x0000a871
+
+#define REG_A6XX_SP_GS_BRANCH_COND 0x0000a872
+
+#define REG_A6XX_SP_GS_PRIMITIVE_CNTL 0x0000a873
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK 0x0000003f
+#define A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_OUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_OUT__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_OUT__MASK;
+}
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK 0x00003fc0
+#define A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT 6
+static inline uint32_t A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__SHIFT) & A6XX_SP_GS_PRIMITIVE_CNTL_FLAGS_REGID__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_OUT(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_OUT_REG(uint32_t i0) { return 0x0000a874 + 0x1*i0; }
+#define A6XX_SP_GS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_GS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_GS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_GS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_GS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_GS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_GS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_GS_VPC_DST_REG(uint32_t i0) { return 0x0000a884 + 0x1*i0; }
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_GS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_GS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_GS_OBJ_FIRST_EXEC_OFFSET 0x0000a88c
+
+#define REG_A6XX_SP_GS_OBJ_START 0x0000a88d
+#define A6XX_SP_GS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_GS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_GS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_OBJ_START__SHIFT) & A6XX_SP_GS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_PARAM 0x0000a88f
+#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_GS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_ADDR 0x0000a890
+#define A6XX_SP_GS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_GS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_GS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_GS_PVT_MEM_SIZE 0x0000a892
+#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_GS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_GS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
+
+#define REG_A6XX_SP_GS_CONFIG 0x0000a894
+#define A6XX_SP_GS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_GS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_GS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_GS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_GS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_GS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NIBO__SHIFT) & A6XX_SP_GS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
+
+#define REG_A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET 0x0000a896
+#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_GS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_VS_TEX_SAMP 0x0000a8a0
+#define A6XX_SP_VS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_VS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_VS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_TEX_SAMP__SHIFT) & A6XX_SP_VS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_HS_TEX_SAMP 0x0000a8a2
+#define A6XX_SP_HS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_HS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_HS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_TEX_SAMP__SHIFT) & A6XX_SP_HS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_DS_TEX_SAMP 0x0000a8a4
+#define A6XX_SP_DS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_DS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_DS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_TEX_SAMP__SHIFT) & A6XX_SP_DS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_GS_TEX_SAMP 0x0000a8a6
+#define A6XX_SP_GS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_GS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_GS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_TEX_SAMP__SHIFT) & A6XX_SP_GS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_VS_TEX_CONST 0x0000a8a8
+#define A6XX_SP_VS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_VS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_VS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_TEX_CONST__SHIFT) & A6XX_SP_VS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_HS_TEX_CONST 0x0000a8aa
+#define A6XX_SP_HS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_HS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_HS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_TEX_CONST__SHIFT) & A6XX_SP_HS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_DS_TEX_CONST 0x0000a8ac
+#define A6XX_SP_DS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_DS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_DS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_TEX_CONST__SHIFT) & A6XX_SP_DS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_GS_TEX_CONST 0x0000a8ae
+#define A6XX_SP_GS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_GS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_GS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_TEX_CONST__SHIFT) & A6XX_SP_GS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_DIFF_FINE 0x00800000
+#define A6XX_SP_FS_CTRL_REG0_UNK24 0x01000000
+#define A6XX_SP_FS_CTRL_REG0_UNK25 0x02000000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_UNK27 0x08000000
+#define A6XX_SP_FS_CTRL_REG0_EARLYPREAMBLE 0x10000000
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+#define A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_FS_BRANCH_COND 0x0000a981
+
+#define REG_A6XX_SP_FS_OBJ_FIRST_EXEC_OFFSET 0x0000a982
+
+#define REG_A6XX_SP_FS_OBJ_START 0x0000a983
+#define A6XX_SP_FS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_FS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OBJ_START__SHIFT) & A6XX_SP_FS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_PARAM 0x0000a985
+#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_FS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_ADDR 0x0000a986
+#define A6XX_SP_FS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_FS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_FS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_FS_PVT_MEM_SIZE 0x0000a988
+#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_FS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_FS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
+#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_SP_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_SP_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_SP_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A6XX_SP_BLEND_CNTL_DUAL_COLOR_IN_ENABLE 0x00000200
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
+
+#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DUAL_COLOR_IN_ENABLE 0x00000001
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK 0x00ff0000
+#define A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK 0xff000000
+#define A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT 24
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_STENCILREF_REGID__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A6XX_SP_FS_MRT_REG_UNK10 0x00000400
+
+#define REG_A6XX_SP_FS_PREFETCH_CNTL 0x0000a99e
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK 0x00000007
+#define A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_COUNT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_COUNT__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_COUNT__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CNTL_IJ_WRITE_DISABLE 0x00000008
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK4 0x00000010
+#define A6XX_SP_FS_PREFETCH_CNTL_WRITE_COLOR_TO_OUTPUT 0x00000020
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK6__MASK 0x00007fc0
+#define A6XX_SP_FS_PREFETCH_CNTL_UNK6__SHIFT 6
+static inline uint32_t A6XX_SP_FS_PREFETCH_CNTL_UNK6(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CNTL_UNK6__SHIFT) & A6XX_SP_FS_PREFETCH_CNTL_UNK6__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_PREFETCH_CMD(uint32_t i0) { return 0x0000a99f + 0x1*i0; }
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__MASK 0x0000007f
+#define A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SRC(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_SRC__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SRC__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK 0x00000780
+#define A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT 7
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK 0x0000f800
+#define A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT 11
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_TEX_ID__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_DST__MASK 0x003f0000
+#define A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT 16
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_DST(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_DST__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_DST__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK 0x03c00000
+#define A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT 22
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_WRMASK__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_WRMASK__MASK;
+}
+#define A6XX_SP_FS_PREFETCH_CMD_HALF 0x04000000
+#define A6XX_SP_FS_PREFETCH_CMD_UNK27 0x08000000
+#define A6XX_SP_FS_PREFETCH_CMD_BINDLESS 0x10000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__MASK 0xe0000000
+#define A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT 29
+static inline uint32_t A6XX_SP_FS_PREFETCH_CMD_CMD(enum a6xx_tex_prefetch_cmd val)
+{
+ return ((val) << A6XX_SP_FS_PREFETCH_CMD_CMD__SHIFT) & A6XX_SP_FS_PREFETCH_CMD_CMD__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_BINDLESS_PREFETCH_CMD(uint32_t i0) { return 0x0000a9a3 + 0x1*i0; }
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK 0x0000ffff
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_SAMP_ID__MASK;
+}
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK 0xffff0000
+#define A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT 16
+static inline uint32_t A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__SHIFT) & A6XX_SP_FS_BINDLESS_PREFETCH_CMD_TEX_ID__MASK;
+}
+
+#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
+
+#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
+
+#define REG_A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET 0x0000a9a9
+#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_FS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_UNK21 0x00200000
+#define A6XX_SP_CS_CTRL_REG0_UNK22 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE 0x00800000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+#define A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADMODE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_UNK13 0x00002000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
+#define REG_A6XX_SP_CS_UNKNOWN_A9B1 0x0000a9b1
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK 0x0000001f
+#define A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__SHIFT) & A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE__MASK;
+}
+#define A6XX_SP_CS_UNKNOWN_A9B1_UNK5 0x00000020
+#define A6XX_SP_CS_UNKNOWN_A9B1_UNK6 0x00000040
+
+#define REG_A6XX_SP_CS_BRANCH_COND 0x0000a9b2
+
+#define REG_A6XX_SP_CS_OBJ_FIRST_EXEC_OFFSET 0x0000a9b3
+
+#define REG_A6XX_SP_CS_OBJ_START 0x0000a9b4
+#define A6XX_SP_CS_OBJ_START__MASK 0xffffffff
+#define A6XX_SP_CS_OBJ_START__SHIFT 0
+static inline uint32_t A6XX_SP_CS_OBJ_START(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_OBJ_START__SHIFT) & A6XX_SP_CS_OBJ_START__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_PARAM 0x0000a9b6
+#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK 0x000000ff
+#define A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val >> 9) << A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM__MASK;
+}
+#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__SHIFT) & A6XX_SP_CS_PVT_MEM_PARAM_HWSTACKSIZEPERTHREAD__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_ADDR 0x0000a9b7
+#define A6XX_SP_CS_PVT_MEM_ADDR__MASK 0xffffffff
+#define A6XX_SP_CS_PVT_MEM_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_PVT_MEM_ADDR__SHIFT) & A6XX_SP_CS_PVT_MEM_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_CS_PVT_MEM_SIZE 0x0000a9b9
+#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK 0x0003ffff
+#define A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(uint32_t val)
+{
+ return ((val >> 12) << A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__SHIFT) & A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE__MASK;
+}
+#define A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT 0x80000000
+
+#define REG_A6XX_SP_CS_TEX_COUNT 0x0000a9ba
+
+#define REG_A6XX_SP_CS_CONFIG 0x0000a9bb
+#define A6XX_SP_CS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_CS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_CS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_CS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_CS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_CS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_CS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_CS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NTEX__SHIFT) & A6XX_SP_CS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_CS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_CS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NSAMP__SHIFT) & A6XX_SP_CS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_CS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_CS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_CS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CONFIG_NIBO__SHIFT) & A6XX_SP_CS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
+
+#define REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET 0x0000a9bd
+#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK 0x0007ffff
+#define A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(uint32_t val)
+{
+ return ((val >> 11) << A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__SHIFT) & A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_SP_CS_CNTL_0 0x0000a9c2
+#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_SP_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00
+#define A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT 8
+static inline uint32_t A6XX_SP_CS_CNTL_0_WGSIZECONSTID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGSIZECONSTID__MASK;
+}
+#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000
+#define A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16
+static inline uint32_t A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID__MASK;
+}
+#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_SP_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_SP_CS_CNTL_1 0x0000a9c3
+#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
+#define A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
+static inline uint32_t A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
+}
+#define A6XX_SP_CS_CNTL_1_SINGLE_SP_CORE 0x00000100
+#define A6XX_SP_CS_CNTL_1_THREADSIZE__MASK 0x00000200
+#define A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT 9
+static inline uint32_t A6XX_SP_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_SP_CS_CNTL_1_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
+
+#define REG_A6XX_SP_FS_TEX_SAMP 0x0000a9e0
+#define A6XX_SP_FS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_FS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_FS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_TEX_SAMP__SHIFT) & A6XX_SP_FS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_CS_TEX_SAMP 0x0000a9e2
+#define A6XX_SP_CS_TEX_SAMP__MASK 0xffffffff
+#define A6XX_SP_CS_TEX_SAMP__SHIFT 0
+static inline uint32_t A6XX_SP_CS_TEX_SAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_TEX_SAMP__SHIFT) & A6XX_SP_CS_TEX_SAMP__MASK;
+}
+
+#define REG_A6XX_SP_FS_TEX_CONST 0x0000a9e4
+#define A6XX_SP_FS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_FS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_FS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_TEX_CONST__SHIFT) & A6XX_SP_FS_TEX_CONST__MASK;
+}
+
+#define REG_A6XX_SP_CS_TEX_CONST 0x0000a9e6
+#define A6XX_SP_CS_TEX_CONST__MASK 0xffffffff
+#define A6XX_SP_CS_TEX_CONST__SHIFT 0
+static inline uint32_t A6XX_SP_CS_TEX_CONST(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_TEX_CONST__SHIFT) & A6XX_SP_CS_TEX_CONST__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000a9e8 + 0x2*i0; }
+#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
+#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
+static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
+{
+ return ((val) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
+}
+#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
+static inline uint32_t A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_CS_IBO 0x0000a9f2
+#define A6XX_SP_CS_IBO__MASK 0xffffffff
+#define A6XX_SP_CS_IBO__SHIFT 0
+static inline uint32_t A6XX_SP_CS_IBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_IBO__SHIFT) & A6XX_SP_CS_IBO__MASK;
+}
+
+#define REG_A6XX_SP_CS_IBO_COUNT 0x0000aa00
+
+#define REG_A6XX_SP_MODE_CONTROL 0x0000ab00
+#define A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE 0x00000001
+#define A6XX_SP_MODE_CONTROL_ISAMMODE__MASK 0x00000006
+#define A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT 1
+static inline uint32_t A6XX_SP_MODE_CONTROL_ISAMMODE(enum a6xx_isam_mode val)
+{
+ return ((val) << A6XX_SP_MODE_CONTROL_ISAMMODE__SHIFT) & A6XX_SP_MODE_CONTROL_ISAMMODE__MASK;
+}
+#define A6XX_SP_MODE_CONTROL_SHARED_CONSTS_ENABLE 0x00000008
+
+#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
+#define A6XX_SP_FS_CONFIG_BINDLESS_TEX 0x00000001
+#define A6XX_SP_FS_CONFIG_BINDLESS_SAMP 0x00000002
+#define A6XX_SP_FS_CONFIG_BINDLESS_IBO 0x00000004
+#define A6XX_SP_FS_CONFIG_BINDLESS_UBO 0x00000008
+#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x003e0000
+#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NIBO__MASK 0x1fc00000
+#define A6XX_SP_FS_CONFIG_NIBO__SHIFT 22
+static inline uint32_t A6XX_SP_FS_CONFIG_NIBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NIBO__SHIFT) & A6XX_SP_FS_CONFIG_NIBO__MASK;
+}
+
+#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_SP_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000ab10 + 0x2*i0; }
+#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
+#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
+static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
+{
+ return ((val) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
+}
+#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
+static inline uint32_t A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_SP_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_IBO 0x0000ab1a
+#define A6XX_SP_IBO__MASK 0xffffffff
+#define A6XX_SP_IBO__SHIFT 0
+static inline uint32_t A6XX_SP_IBO(uint32_t val)
+{
+ return ((val) << A6XX_SP_IBO__SHIFT) & A6XX_SP_IBO__MASK;
+}
+
+#define REG_A6XX_SP_IBO_COUNT 0x0000ab20
+
+#define REG_A6XX_SP_2D_DST_FORMAT 0x0000acc0
+#define A6XX_SP_2D_DST_FORMAT_NORM 0x00000001
+#define A6XX_SP_2D_DST_FORMAT_SINT 0x00000002
+#define A6XX_SP_2D_DST_FORMAT_UINT 0x00000004
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK 0x000007f8
+#define A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT 3
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__SHIFT) & A6XX_SP_2D_DST_FORMAT_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_2D_DST_FORMAT_SRGB 0x00000800
+#define A6XX_SP_2D_DST_FORMAT_MASK__MASK 0x0000f000
+#define A6XX_SP_2D_DST_FORMAT_MASK__SHIFT 12
+static inline uint32_t A6XX_SP_2D_DST_FORMAT_MASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_2D_DST_FORMAT_MASK__SHIFT) & A6XX_SP_2D_DST_FORMAT_MASK__MASK;
+}
+
+#define REG_A6XX_SP_DBG_ECO_CNTL 0x0000ae00
+
+#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
+
+#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
+
+#define REG_A6XX_SP_CHICKEN_BITS 0x0000ae03
+
+#define REG_A6XX_SP_FLOAT_CNTL 0x0000ae04
+#define A6XX_SP_FLOAT_CNTL_F16_NO_INF 0x00000008
+
+#define REG_A6XX_SP_PERFCTR_ENABLE 0x0000ae0f
+#define A6XX_SP_PERFCTR_ENABLE_VS 0x00000001
+#define A6XX_SP_PERFCTR_ENABLE_HS 0x00000002
+#define A6XX_SP_PERFCTR_ENABLE_DS 0x00000004
+#define A6XX_SP_PERFCTR_ENABLE_GS 0x00000008
+#define A6XX_SP_PERFCTR_ENABLE_FS 0x00000010
+#define A6XX_SP_PERFCTR_ENABLE_CS 0x00000020
+
+static inline uint32_t REG_A6XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae10 + 0x1*i0; }
+
+static inline uint32_t REG_A7XX_SP_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000ae60 + 0x1*i0; }
+
+#define REG_A7XX_SP_READ_SEL 0x0000ae6d
+
+static inline uint32_t REG_A7XX_SP_PERFCTR_SP_SEL(uint32_t i0) { return 0x0000ae80 + 0x1*i0; }
+
+#define REG_A6XX_SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
+
+#define REG_A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR 0x0000b180
+#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
+#define A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+
+#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
+
+#define REG_A6XX_SP_UNKNOWN_B190 0x0000b190
+
+#define REG_A6XX_SP_UNKNOWN_B191 0x0000b191
+
+#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK 0x0000000c
+#define A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT 2
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_UNK2__MASK;
+}
+
+#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR 0x0000b302
+#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK 0xffffffff
+#define A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__SHIFT) & A6XX_SP_TP_BORDER_COLOR_BASE_ADDR__MASK;
+}
+
+#define REG_A6XX_SP_TP_SAMPLE_CONFIG 0x0000b304
+#define A6XX_SP_TP_SAMPLE_CONFIG_UNK0 0x00000001
+#define A6XX_SP_TP_SAMPLE_CONFIG_LOCATION_ENABLE 0x00000002
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_0 0x0000b305
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_0_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_SAMPLE_LOCATION_1 0x0000b306
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK 0x0000000f
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK 0x000000f0
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT 4
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_0_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK 0x00000f00
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT 8
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK 0x0000f000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT 12
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_1_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK 0x000f0000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT 16
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK 0x00f00000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT 20
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_2_Y__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK 0x0f000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT 24
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_X__MASK;
+}
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK 0xf0000000
+#define A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT 28
+static inline uint32_t A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__SHIFT) & A6XX_SP_TP_SAMPLE_LOCATION_1_SAMPLE_3_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
+#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_SP_TP_MODE_CNTL 0x0000b309
+#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK 0x00000003
+#define A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT 0
+static inline uint32_t A6XX_SP_TP_MODE_CNTL_ISAMMODE(enum a6xx_isam_mode val)
+{
+ return ((val) << A6XX_SP_TP_MODE_CNTL_ISAMMODE__SHIFT) & A6XX_SP_TP_MODE_CNTL_ISAMMODE__MASK;
+}
+#define A6XX_SP_TP_MODE_CNTL_UNK3__MASK 0x000000fc
+#define A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT 2
+static inline uint32_t A6XX_SP_TP_MODE_CNTL_UNK3(uint32_t val)
+{
+ return ((val) << A6XX_SP_TP_MODE_CNTL_UNK3__SHIFT) & A6XX_SP_TP_MODE_CNTL_UNK3__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_format val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_SRGB 0x00002000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK 0x0000c000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT 14
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_SAMPLES__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_SAMPLES__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK17 0x00020000
+#define A6XX_SP_PS_2D_SRC_INFO_SAMPLES_AVERAGE 0x00040000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK19 0x00080000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK20 0x00100000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK21 0x00200000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK22 0x00400000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK 0x07800000
+#define A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT 23
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_UNK23(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_UNK23__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_UNK23__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_UNK28 0x10000000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC 0x0000b4c2
+#define A6XX_SP_PS_2D_SRC__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC__SHIFT) & A6XX_SP_PS_2D_SRC__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK 0x000001ff
+#define A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PITCH_UNK0__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_UNK0__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x00fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE1 0x0000b4c5
+#define A6XX_SP_PS_2D_SRC_PLANE1__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_PLANE1__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE1(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PLANE1__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE1__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE_PITCH 0x0000b4c7
+#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK 0x00000fff
+#define A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PLANE_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_PLANE2 0x0000b4c8
+#define A6XX_SP_PS_2D_SRC_PLANE2__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_PLANE2__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_PLANE2(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_PLANE2__SHIFT) & A6XX_SP_PS_2D_SRC_PLANE2__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS 0x0000b4ca
+#define A6XX_SP_PS_2D_SRC_FLAGS__MASK 0xffffffff
+#define A6XX_SP_PS_2D_SRC_FLAGS__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_FLAGS__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS__MASK;
+}
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_PITCH 0x0000b4cc
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_FLAGS_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_FLAGS_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_FLAGS_PITCH__MASK;
+}
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CD 0x0000b4cd
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CE 0x0000b4ce
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4CF 0x0000b4cf
+
+#define REG_A6XX_SP_PS_UNKNOWN_B4D0 0x0000b4d0
+
+#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00003fff
+#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x3fff0000
+#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_TPL1_DBG_ECO_CNTL 0x0000b600
+
+#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
+
+#define REG_A6XX_TPL1_UNKNOWN_B602 0x0000b602
+
+#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+#define A6XX_TPL1_NC_MODE_CNTL_MODE 0x00000001
+#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK 0x00000006
+#define A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT 1
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_LOWER_BIT__MASK;
+}
+#define A6XX_TPL1_NC_MODE_CNTL_MIN_ACCESS_LENGTH 0x00000008
+#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK 0x00000010
+#define A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT 4
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT(uint32_t val)
+{
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UPPER_BIT__MASK;
+}
+#define A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK 0x000000c0
+#define A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT 6
+static inline uint32_t A6XX_TPL1_NC_MODE_CNTL_UNK6(uint32_t val)
+{
+ return ((val) << A6XX_TPL1_NC_MODE_CNTL_UNK6__SHIFT) & A6XX_TPL1_NC_MODE_CNTL_UNK6__MASK;
+}
+
+#define REG_A6XX_TPL1_UNKNOWN_B605 0x0000b605
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
+
+static inline uint32_t REG_A6XX_TPL1_PERFCTR_TP_SEL(uint32_t i0) { return 0x0000b610 + 0x1*i0; }
+
+#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_VS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_HS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_DS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_GS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_CMD 0x0000b820
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR 0x0000b821
+#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK 0xffffffff
+#define A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT 0
+static inline uint32_t A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_GEOM_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_A6XX_HLSQ_LOAD_STATE_GEOM_DATA 0x0000b823
+
+#define REG_A6XX_HLSQ_FS_CNTL_0 0x0000b980
+#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK 0x00000001
+#define A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_0_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_HLSQ_FS_CNTL_0_THREADSIZE__SHIFT) & A6XX_HLSQ_FS_CNTL_0_THREADSIZE__MASK;
+}
+#define A6XX_HLSQ_FS_CNTL_0_VARYINGS 0x00000002
+#define A6XX_HLSQ_FS_CNTL_0_UNK2__MASK 0x00000ffc
+#define A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT 2
+static inline uint32_t A6XX_HLSQ_FS_CNTL_0_UNK2(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_FS_CNTL_0_UNK2__SHIFT) & A6XX_HLSQ_FS_CNTL_0_UNK2__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_B981 0x0000b981
+
+#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
+
+#define REG_A7XX_HLSQ_CONTROL_1_REG 0x0000a9c7
+
+#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
+}
+
+#define REG_A7XX_HLSQ_CONTROL_2_REG 0x0000a9c8
+#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK 0xff000000
+#define A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT 24
+static inline uint32_t A7XX_HLSQ_CONTROL_2_REG_CENTERRHW(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__SHIFT) & A7XX_HLSQ_CONTROL_2_REG_CENTERRHW__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
+#define REG_A7XX_HLSQ_CONTROL_3_REG 0x0000a9c9
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK 0x000000ff
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_PIXEL__MASK;
+}
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK 0x0000ff00
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT 8
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_PIXEL__MASK;
+}
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK 0x00ff0000
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT 16
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_PERSP_CENTROID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK 0xff000000
+#define A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT 24
+static inline uint32_t A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__SHIFT) & A7XX_HLSQ_CONTROL_3_REG_IJ_LINEAR_CENTROID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A7XX_HLSQ_CONTROL_4_REG 0x0000a9ca
+#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK 0x000000ff
+#define A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_PERSP_SAMPLE__MASK;
+}
+#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK 0x0000ff00
+#define A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT 8
+static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_IJ_LINEAR_SAMPLE__MASK;
+}
+#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A7XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A6XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
+}
+
+#define REG_A7XX_HLSQ_CONTROL_5_REG 0x0000a9cb
+#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK 0x000000ff
+#define A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT 0
+static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_LINELENGTHREGID__MASK;
+}
+#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK 0x0000ff00
+#define A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT 8
+static inline uint32_t A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID(uint32_t val)
+{
+ return ((val) << A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__SHIFT) & A7XX_HLSQ_CONTROL_5_REG_FOVEATIONQUALITYREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL 0x0000b987
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_CS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_CS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK 0x0000ff00
+#define A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK 0x00ff0000
+#define A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_1 0x0000b998
+#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_1_SINGLE_SP_CORE 0x00000100
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK 0x00000200
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT 9
+static inline uint32_t A6XX_HLSQ_CS_CNTL_1_THREADSIZE(enum a6xx_threadsize val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_1_THREADSIZE__SHIFT) & A6XX_HLSQ_CS_CNTL_1_THREADSIZE__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_1_THREADSIZE_SCALAR 0x00000400
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_CMD 0x0000b9a0
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR 0x0000b9a1
+#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK 0xffffffff
+#define A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT 0
+static inline uint32_t A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__SHIFT) & A6XX_HLSQ_LOAD_STATE_FRAG_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_A6XX_HLSQ_LOAD_STATE_FRAG_DATA 0x0000b9a3
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000b9c0 + 0x2*i0; }
+#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
+#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
+{
+ return ((val) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
+}
+#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
+static inline uint32_t A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_CS_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_UNKNOWN_B9D0 0x0000b9d0
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK 0x0000001f
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__SHIFT) & A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE__MASK;
+}
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK5 0x00000020
+#define A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6 0x00000040
+
+#define REG_A6XX_HLSQ_DRAW_CMD 0x0000bb00
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DRAW_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_DRAW_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DRAW_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_DISPATCH_CMD 0x0000bb01
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK 0x000000ff
+#define A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DISPATCH_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_DISPATCH_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_DISPATCH_CMD_STATE_ID__MASK;
+}
+
+#define REG_A6XX_HLSQ_EVENT_CMD 0x0000bb02
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK 0x00ff0000
+#define A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_HLSQ_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_HLSQ_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_HLSQ_INVALIDATE_CMD 0x0000bb08
+#define A6XX_HLSQ_INVALIDATE_CMD_VS_STATE 0x00000001
+#define A6XX_HLSQ_INVALIDATE_CMD_HS_STATE 0x00000002
+#define A6XX_HLSQ_INVALIDATE_CMD_DS_STATE 0x00000004
+#define A6XX_HLSQ_INVALIDATE_CMD_GS_STATE 0x00000008
+#define A6XX_HLSQ_INVALIDATE_CMD_FS_STATE 0x00000010
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_STATE 0x00000020
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_IBO 0x00000040
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO 0x00000080
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_SHARED_CONST 0x00080000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_SHARED_CONST 0x00000100
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK 0x00003e00
+#define A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT 9
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS__MASK;
+}
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK 0x0007c000
+#define A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT 14
+static inline uint32_t A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__SHIFT) & A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS__MASK;
+}
+
+#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+}
+#define A6XX_HLSQ_FS_CNTL_ENABLED 0x00000100
+
+#define REG_A6XX_HLSQ_SHARED_CONSTS 0x0000bb11
+#define A6XX_HLSQ_SHARED_CONSTS_ENABLE 0x00000001
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR(uint32_t i0) { return 0x0000bb20 + 0x2*i0; }
+#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK 0x00000003
+#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT 0
+static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE(enum a6xx_bindless_descriptor_size val)
+{
+ return ((val) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_DESC_SIZE__MASK;
+}
+#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK 0xfffffffc
+#define A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT 2
+static inline uint32_t A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__SHIFT) & A6XX_HLSQ_BINDLESS_BASE_DESCRIPTOR_ADDR__MASK;
+}
+
+#define REG_A6XX_HLSQ_2D_EVENT_CMD 0x0000bd80
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK 0x0000ff00
+#define A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_STATE_ID__MASK;
+}
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK 0x0000007f
+#define A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT 0
+static inline uint32_t A6XX_HLSQ_2D_EVENT_CMD_EVENT(enum vgt_event_type val)
+{
+ return ((val) << A6XX_HLSQ_2D_EVENT_CMD_EVENT__SHIFT) & A6XX_HLSQ_2D_EVENT_CMD_EVENT__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01
+
+#define REG_A6XX_HLSQ_DBG_ECO_CNTL 0x0000be04
+
+#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE08 0x0000be08
+
+static inline uint32_t REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL(uint32_t i0) { return 0x0000be10 + 0x1*i0; }
+
+#define REG_A6XX_HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE 0x0000be22
+
+#define REG_A7XX_SP_AHB_READ_APERTURE 0x0000c000
+
+#define REG_A6XX_CP_EVENT_START 0x0000d600
+#define A6XX_CP_EVENT_START_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_EVENT_START_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_EVENT_START_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_EVENT_END 0x0000d601
+#define A6XX_CP_EVENT_END_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_EVENT_END_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_EVENT_END_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_EVENT_END_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_START 0x0000d700
+#define A6XX_CP_2D_EVENT_START_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_2D_EVENT_START_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_2D_EVENT_START_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_START_STATE_ID__MASK;
+}
+
+#define REG_A6XX_CP_2D_EVENT_END 0x0000d701
+#define A6XX_CP_2D_EVENT_END_STATE_ID__MASK 0x000000ff
+#define A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT 0
+static inline uint32_t A6XX_CP_2D_EVENT_END_STATE_ID(uint32_t val)
+{
+ return ((val) << A6XX_CP_2D_EVENT_END_STATE_ID__SHIFT) & A6XX_CP_2D_EVENT_END_STATE_ID__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_0 0x00000000
+#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_1 0x00000001
+#define A6XX_TEX_SAMP_1_CLAMPENABLE 0x00000001
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_2 0x00000002
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK 0x00000003
+#define A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_SAMP_2_REDUCTION_MODE(enum a6xx_reduction_mode val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_REDUCTION_MODE__SHIFT) & A6XX_TEX_SAMP_2_REDUCTION_MODE__MASK;
+}
+#define A6XX_TEX_SAMP_2_CHROMA_LINEAR 0x00000020
+#define A6XX_TEX_SAMP_2_BCOLOR__MASK 0xffffff80
+#define A6XX_TEX_SAMP_2_BCOLOR__SHIFT 7
+static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR(uint32_t val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_BCOLOR__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_3 0x00000003
+
+#define REG_A6XX_TEX_CONST_0 0x00000000
+#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A6XX_TEX_CONST_0_SRGB 0x00000004
+#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_X 0x00010000
+#define A6XX_TEX_CONST_0_CHROMA_MIDPOINT_Y 0x00040000
+#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
+#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A6XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_format val)
+{
+ return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
+}
+#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A6XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_1 0x00000001
+#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_2 0x00000002
+#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK 0x0000fff0
+#define A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT 4
+static inline uint32_t A6XX_TEX_CONST_2_STRUCTSIZETEXELS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_STRUCTSIZETEXELS__SHIFT) & A6XX_TEX_CONST_2_STRUCTSIZETEXELS__MASK;
+}
+#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK 0x003f0000
+#define A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT 16
+static inline uint32_t A6XX_TEX_CONST_2_STARTOFFSETTEXELS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_STARTOFFSETTEXELS__SHIFT) & A6XX_TEX_CONST_2_STARTOFFSETTEXELS__MASK;
+}
+#define A6XX_TEX_CONST_2_PITCHALIGN__MASK 0x0000000f
+#define A6XX_TEX_CONST_2_PITCHALIGN__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_2_PITCHALIGN(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCHALIGN__SHIFT) & A6XX_TEX_CONST_2_PITCHALIGN__MASK;
+}
+#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_2_TYPE__MASK 0xe0000000
+#define A6XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
+{
+ return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_3 0x00000003
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK 0x07800000
+#define A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT 23
+static inline uint32_t A6XX_TEX_CONST_3_MIN_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_MIN_LAYERSZ__SHIFT) & A6XX_TEX_CONST_3_MIN_LAYERSZ__MASK;
+}
+#define A6XX_TEX_CONST_3_TILE_ALL 0x08000000
+#define A6XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A6XX_TEX_CONST_4 0x00000004
+#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_5 0x00000005
+#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_6 0x00000006
+#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK 0x00000fff
+#define A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_6_MIN_LOD_CLAMP(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_CONST_6_MIN_LOD_CLAMP__SHIFT) & A6XX_TEX_CONST_6_MIN_LOD_CLAMP__MASK;
+}
+#define A6XX_TEX_CONST_6_PLANE_PITCH__MASK 0xffffff00
+#define A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT 8
+static inline uint32_t A6XX_TEX_CONST_6_PLANE_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_6_PLANE_PITCH__SHIFT) & A6XX_TEX_CONST_6_PLANE_PITCH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_7 0x00000007
+#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_8 0x00000008
+#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_9 0x00000009
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK 0x0001ffff
+#define A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_9_FLAG_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_10 0x0000000a
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK 0x0000007f
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK 0x00000f00
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT 8
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGW__MASK;
+}
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK 0x0000f000
+#define A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT 12
+static inline uint32_t A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__SHIFT) & A6XX_TEX_CONST_10_FLAG_BUFFER_LOGH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_11 0x0000000b
+
+#define REG_A6XX_TEX_CONST_12 0x0000000c
+
+#define REG_A6XX_TEX_CONST_13 0x0000000d
+
+#define REG_A6XX_TEX_CONST_14 0x0000000e
+
+#define REG_A6XX_TEX_CONST_15 0x0000000f
+
+#define REG_A6XX_UBO_0 0x00000000
+#define A6XX_UBO_0_BASE_LO__MASK 0xffffffff
+#define A6XX_UBO_0_BASE_LO__SHIFT 0
+static inline uint32_t A6XX_UBO_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A6XX_UBO_0_BASE_LO__SHIFT) & A6XX_UBO_0_BASE_LO__MASK;
+}
+
+#define REG_A6XX_UBO_1 0x00000001
+#define A6XX_UBO_1_BASE_HI__MASK 0x0001ffff
+#define A6XX_UBO_1_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_UBO_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_UBO_1_BASE_HI__SHIFT) & A6XX_UBO_1_BASE_HI__MASK;
+}
+#define A6XX_UBO_1_SIZE__MASK 0xfffe0000
+#define A6XX_UBO_1_SIZE__SHIFT 17
+static inline uint32_t A6XX_UBO_1_SIZE(uint32_t val)
+{
+ return ((val) << A6XX_UBO_1_SIZE__SHIFT) & A6XX_UBO_1_SIZE__MASK;
+}
+
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+
+
+#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
new file mode 100644
index 0000000000..7923129363
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -0,0 +1,1738 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <drm/drm_gem.h>
+
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+#include "msm_gem.h"
+#include "msm_gpu_trace.h"
+#include "msm_mmu.h"
+
+static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ /* FIXME: add a banner here */
+ gmu->hung = true;
+
+ /* Turn off the hangcheck timer while we are resetting */
+ del_timer(&gpu->hangcheck_timer);
+
+ /* Queue the GPU handler because we need to treat this as a recovery */
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_gmu_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
+ dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
+
+ a6xx_gmu_fault(gmu);
+ }
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
+ dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+ dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a6xx_hfi_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
+ dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
+
+ a6xx_gmu_fault(gmu);
+ }
+
+ return IRQ_HANDLED;
+}
+
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
+/* Check to see if the GX rail is still powered */
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+}
+
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 perf_index;
+ unsigned long gpu_freq;
+ int ret = 0;
+
+ gpu_freq = dev_pm_opp_get_freq(opp);
+
+ if (gpu_freq == gmu->freq)
+ return;
+
+ for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+ if (gpu_freq == gmu->gpu_freqs[perf_index])
+ break;
+
+ gmu->current_perf_index = perf_index;
+ gmu->freq = gmu->gpu_freqs[perf_index];
+
+ trace_msm_gmu_freq_change(gmu->freq, perf_index);
+
+ /*
+ * This can get called from devfreq while the hardware is idle. Don't
+ * bring up the power if it isn't already active. All we're doing here
+ * is updating the frequency so that when we come back online we're at
+ * the right rate.
+ */
+ if (suspended)
+ return;
+
+ if (!gmu->legacy) {
+ a6xx_hfi_set_freq(gmu, perf_index);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+ return;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
+ ((3 & 0xf) << 28) | perf_index);
+
+ /*
+ * Send an invalid index as a vote for the bus bandwidth and let the
+ * firmware decide on the right vote
+ */
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
+
+ /* Set and clear the OOB for DCVS to trigger the GMU */
+ a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
+
+ ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ if (ret)
+ dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
+
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+}
+
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return gmu->freq;
+}
+
+static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int local = gmu->idle_level;
+
+ /* SPTP and IFPC both report as IFPC */
+ if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
+ local = GMU_IDLE_STATE_IFPC;
+
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val == local) {
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
+ !a6xx_gmu_gx_is_on(gmu))
+ return true;
+ }
+
+ return false;
+}
+
+/* Wait for the GMU to get to its most idle state */
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
+{
+ return spin_until(a6xx_gmu_check_idle_level(gmu));
+}
+
+static int a6xx_gmu_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+ u32 mask, reset_val;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
+ if (val <= 0x20010004) {
+ mask = 0xffffffff;
+ reset_val = 0xbabeface;
+ } else {
+ mask = 0x1ff;
+ reset_val = 0x100;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ /* Set the log wptr index
+ * note: downstream saves the value in poweroff and restores it here
+ */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
+ (val & mask) == reset_val, 100, 10000);
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+
+ return ret;
+}
+
+static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
+ val & 1, 100, 10000);
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
+
+ return ret;
+}
+
+struct a6xx_gmu_oob_bits {
+ int set, ack, set_new, ack_new, clear, clear_new;
+ const char *name;
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
+ [GMU_OOB_GPU_SET] = {
+ .name = "GPU_SET",
+ .set = 16,
+ .ack = 24,
+ .set_new = 30,
+ .ack_new = 31,
+ .clear = 24,
+ .clear_new = 31,
+ },
+
+ [GMU_OOB_PERFCOUNTER_SET] = {
+ .name = "PERFCOUNTER",
+ .set = 17,
+ .ack = 25,
+ .set_new = 28,
+ .ack_new = 30,
+ .clear = 25,
+ .clear_new = 29,
+ },
+
+ [GMU_OOB_BOOT_SLUMBER] = {
+ .name = "BOOT_SLUMBER",
+ .set = 22,
+ .ack = 30,
+ .clear = 30,
+ },
+
+ [GMU_OOB_DCVS_SET] = {
+ .name = "GPU_DCVS",
+ .set = 23,
+ .ack = 31,
+ .clear = 31,
+ },
+};
+
+/* Trigger a OOB (out of band) request to the GMU */
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int ret;
+ u32 val;
+ int request, ack;
+
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
+ return -EINVAL;
+
+ if (gmu->legacy) {
+ request = a6xx_gmu_oob_bits[state].set;
+ ack = a6xx_gmu_oob_bits[state].ack;
+ } else {
+ request = a6xx_gmu_oob_bits[state].set_new;
+ ack = a6xx_gmu_oob_bits[state].ack_new;
+ if (!request || !ack) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Invalid non-legacy GMU request %s\n",
+ a6xx_gmu_oob_bits[state].name);
+ return -EINVAL;
+ }
+ }
+
+ /* Trigger the equested OOB operation */
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
+
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Timeout waiting for GMU OOB set %s: 0x%x\n",
+ a6xx_gmu_oob_bits[state].name,
+ gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
+
+ /* Clear the acknowledge interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
+
+ return ret;
+}
+
+/* Clear a pending OOB state in the GMU */
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int bit;
+
+ WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+
+ if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
+ return;
+
+ if (gmu->legacy)
+ bit = a6xx_gmu_oob_bits[state].clear;
+ else
+ bit = a6xx_gmu_oob_bits[state].clear_new;
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
+}
+
+/* Enable CPU control of SPTP power power collapse */
+int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ if (!gmu->legacy)
+ return 0;
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x38) == 0x28, 1, 100);
+
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+ }
+
+ return 0;
+}
+
+/* Disable CPU control of SPTP power power collapse */
+void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ if (!gmu->legacy)
+ return;
+
+ /* Make sure retention is on */
+ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x04), 100, 10000);
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+}
+
+/* Let the GMU know we are starting a boot sequence */
+static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
+{
+ u32 vote;
+
+ /* Let the GMU know we are getting ready for boot */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
+
+ /* Choose the "default" power level as the highest available */
+ vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
+ gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
+
+ /* Let the GMU know the boot sequence has started */
+ return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+}
+
+/* Let the GMU know that we are about to go into slumber */
+static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /* Disable the power counter so the GMU isn't busy */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
+ /* Disable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
+ a6xx_sptprac_disable(gmu);
+
+ if (!gmu->legacy) {
+ ret = a6xx_hfi_send_prep_slumber(gmu);
+ goto out;
+ }
+
+ /* Tell the GMU to get ready to slumber */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
+
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ if (!ret) {
+ /* Check to see if the GMU really did slumber */
+ if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
+ != 0x0f) {
+ DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+out:
+ /* Put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ return ret;
+}
+
+static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
+ /* Wait for the register to finish posting */
+ wmb();
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+ val & (1 << 1), 100, 10000);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
+ return ret;
+ }
+
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ !val, 100, 10000);
+
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ return 0;
+}
+
+static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ val, val & (1 << 16), 100, 10000);
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+}
+
+static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
+{
+ msm_writel(value, ptr + (offset << 2));
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name);
+
+static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct platform_device *pdev = to_platform_device(gmu->dev);
+ void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+ void __iomem *seqptr = NULL;
+ uint32_t pdc_address_offset;
+ bool pdc_in_aop = false;
+
+ if (IS_ERR(pdcptr))
+ goto err;
+
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
+ pdc_in_aop = true;
+ else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
+ pdc_address_offset = 0x30090;
+ else if (adreno_is_a619(adreno_gpu))
+ pdc_address_offset = 0x300a0;
+ else
+ pdc_address_offset = 0x30080;
+
+ if (!pdc_in_aop) {
+ seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ if (IS_ERR(seqptr))
+ goto err;
+ }
+
+ /* Disable SDE clock gating */
+ gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
+ /* Setup RSC PDC handshake for sleep and wakeup */
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+
+ /* Load RSC sequencer uCode for sleep and wakeup */
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
+ } else {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ }
+
+ if (pdc_in_aop)
+ goto setup_pdc;
+
+ /* Load PDC sequencer uCode for power up and power down sequence */
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+
+ /* Set TCS commands used by PDC sequence for low power modes */
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu))
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
+ else
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+
+ /* Setup GPU PDC */
+setup_pdc:
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+
+ /* ensure no writes happen before the uCode is fully written */
+ wmb();
+
+ a6xx_rpmh_stop(gmu);
+
+err:
+ if (!IS_ERR_OR_NULL(pdcptr))
+ iounmap(pdcptr);
+ if (!IS_ERR_OR_NULL(seqptr))
+ iounmap(seqptr);
+}
+
+/*
+ * The lowest 16 bits of this value are the number of XO clock cycles for main
+ * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
+ * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
+ */
+
+#define GMU_PWR_COL_HYST 0x000a1680
+
+/* Set up the idle state for the GMU */
+static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
+{
+ /* Disable GMU WB/RB buffer */
+ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
+
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
+
+ switch (gmu->idle_level) {
+ case GMU_IDLE_STATE_IFPC:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
+ fallthrough;
+ case GMU_IDLE_STATE_SPTP:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
+ }
+
+ /* Enable RPMh GPU client */
+ gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
+ A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
+}
+
+struct block_header {
+ u32 addr;
+ u32 size;
+ u32 type;
+ u32 value;
+ u32 data[];
+};
+
+static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
+{
+ if (!in_range(blk->addr, bo->iova, bo->size))
+ return false;
+
+ memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
+ return true;
+}
+
+static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
+ const struct block_header *blk;
+ u32 reg_offset;
+
+ u32 itcm_base = 0x00000000;
+ u32 dtcm_base = 0x00040000;
+
+ if (adreno_is_a650_family(adreno_gpu))
+ dtcm_base = 0x10004000;
+
+ if (gmu->legacy) {
+ /* Sanity check the size of the firmware that was loaded */
+ if (fw_image->size > 0x8000) {
+ DRM_DEV_ERROR(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
+ (u32*) fw_image->data, fw_image->size);
+ return 0;
+ }
+
+
+ for (blk = (const struct block_header *) fw_image->data;
+ (const u8*) blk < fw_image->data + fw_image->size;
+ blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ if (blk->size == 0)
+ continue;
+
+ if (in_range(blk->addr, itcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - itcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - dtcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (!fw_block_mem(&gmu->icache, blk) &&
+ !fw_block_mem(&gmu->dcache, blk) &&
+ !fw_block_mem(&gmu->dummy, blk)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
+ blk->addr, blk->size, blk->data[0]);
+ }
+ }
+
+ return 0;
+}
+
+static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ int ret;
+ u32 chipid;
+
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
+ }
+
+ if (state == GMU_WARM_BOOT) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ } else {
+ if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ "GMU firmware is not loaded\n"))
+ return -ENOENT;
+
+ /* Turn on register retention */
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_gmu_fw_load(gmu);
+ if (ret)
+ return ret;
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
+
+ /* Write the iova of the HFI table */
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
+ (1 << 31) | (0xa << 18) | (0xa0));
+
+ /*
+ * Snapshots toggle the NMI bit which will result in a jump to the NMI
+ * handler instead of __main. Set the M3 config value to avoid that.
+ */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
+
+ /*
+ * Note that the GMU has a slightly different layout for
+ * chip_id, for whatever reason, so a bit of massaging
+ * is needed. The upper 16b are the same, but minor and
+ * patchid are packed in four bits each with the lower
+ * 8b unused:
+ */
+ chipid = adreno_gpu->chip_id & 0xffff0000;
+ chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
+ chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
+ gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+
+ /* Set up the lowest idle level on the GMU */
+ a6xx_gmu_power_config(gmu);
+
+ ret = a6xx_gmu_start(gmu);
+ if (ret)
+ return ret;
+
+ if (gmu->legacy) {
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
+ ret = a6xx_sptprac_enable(gmu);
+ if (ret)
+ return ret;
+ }
+
+ ret = a6xx_gmu_hfi_start(gmu);
+ if (ret)
+ return ret;
+
+ /* FIXME: Do we need this wmb() here? */
+ wmb();
+
+ return 0;
+}
+
+#define A6XX_HFI_IRQ_MASK \
+ (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+
+#define A6XX_GMU_IRQ_MASK \
+ (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+
+static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
+{
+ disable_irq(gmu->gmu_irq);
+ disable_irq(gmu->hfi_irq);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
+}
+
+static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* Make sure there are no outstanding RPMh votes */
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ (val & 1), 100, 1000);
+}
+
+/* Force the GMU off in case it isn't responsive */
+static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ /*
+ * Turn off keep alive that might have been enabled by the hang
+ * interrupt
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
+
+ /* Make sure there are no outstanding RPMh votes */
+ a6xx_gmu_rpmh_off(gmu);
+
+ /* Clear the WRITEDROPPED fields and put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ /* Make sure the above writes go through */
+ wmb();
+
+ /* Halt the gmu cm3 core */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+
+ /* Reset GPU core blocks */
+ a6xx_gpu_sw_reset(gpu, true);
+}
+
+static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+ struct dev_pm_opp *gpu_opp;
+ unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+ gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+ if (IS_ERR(gpu_opp))
+ return;
+
+ gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
+ a6xx_gmu_set_freq(gpu, gpu_opp, false);
+ dev_pm_opp_put(gpu_opp);
+}
+
+static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
+{
+ struct dev_pm_opp *gpu_opp;
+ unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
+
+ gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
+ if (IS_ERR(gpu_opp))
+ return;
+
+ dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
+ dev_pm_opp_put(gpu_opp);
+}
+
+int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int status, ret;
+
+ if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
+ return -EINVAL;
+
+ gmu->hung = false;
+
+ /* Turn on the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_get_sync(gmu->gxpd);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ clk_set_rate(gmu->hub_clk, 150000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret) {
+ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ return ret;
+ }
+
+ /* Set the bus quota to a reasonable value for boot */
+ a6xx_gmu_set_initial_bw(gpu, gmu);
+
+ /* Enable the GMU interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
+ enable_irq(gmu->gmu_irq);
+
+ /* Check to see if we are doing a cold or warm boot */
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+
+ /*
+ * Warm boot path does not work on newer GPUs
+ * Presumably this is because icache/dcache regions must be restored
+ */
+ if (!gmu->legacy)
+ status = GMU_COLD_BOOT;
+
+ ret = a6xx_gmu_fw_start(gmu, status);
+ if (ret)
+ goto out;
+
+ ret = a6xx_hfi_start(gmu, status);
+ if (ret)
+ goto out;
+
+ /*
+ * Turn on the GMU firmware fault interrupt after we know the boot
+ * sequence is successful
+ */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
+ enable_irq(gmu->hfi_irq);
+
+ /* Set the GPU to the current freq */
+ a6xx_gmu_set_initial_freq(gpu, gmu);
+
+out:
+ /* On failure, shut down the GMU to leave it in a good state */
+ if (ret) {
+ disable_irq(gmu->gmu_irq);
+ a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ }
+
+ return ret;
+}
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
+{
+ u32 reg;
+
+ if (!gmu->initialized)
+ return true;
+
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
+
+ if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
+ return false;
+
+ return true;
+}
+
+/* Gracefully try to shut down the GMU and by extension the GPU */
+static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 val;
+
+ /*
+ * The GMU may still be in slumber unless the GPU started so check and
+ * skip putting it back into slumber if so
+ */
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val != 0xf) {
+ int ret = a6xx_gmu_wait_for_idle(gmu);
+
+ /* If the GMU isn't responding assume it is hung */
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+
+ /* tell the GMU we want to slumber */
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
+
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ DRM_DEV_ERROR(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+ }
+
+ /* Turn off HFI */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts and mask the hardware */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Tell RPMh to power off the GPU */
+ a6xx_rpmh_stop(gmu);
+}
+
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+
+ if (!pm_runtime_active(gmu->dev))
+ return 0;
+
+ /*
+ * Force the GMU off if we detected a hang, otherwise try to shut it
+ * down gracefully
+ */
+ if (gmu->hung)
+ a6xx_gmu_force_off(gmu);
+ else
+ a6xx_gmu_shutdown(gmu);
+
+ /* Remove the bus vote */
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
+
+ /*
+ * Make sure the GX domain is off before turning off the GMU (CX)
+ * domain. Usually the GMU does this but only if the shutdown sequence
+ * was successful
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_put_sync(gmu->gxpd);
+
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+
+ pm_runtime_put_sync(gmu->dev);
+
+ return 0;
+}
+
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
+{
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
+ msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
+
+ gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
+ msm_gem_address_space_put(gmu->aspace);
+}
+
+static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
+ size_t size, u64 iova, const char *name)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct drm_device *dev = a6xx_gpu->base.base.dev;
+ uint32_t flags = MSM_BO_WC;
+ u64 range_start, range_end;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+ if (!iova) {
+ /* no fixed address - use GMU's uncached range */
+ range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
+ range_end = 0x80000000;
+ } else {
+ /* range for fixed address */
+ range_start = iova;
+ range_end = iova + size;
+ /* use IOMMU_PRIV for icache/dcache */
+ flags |= MSM_BO_MAP_PRIV;
+ }
+
+ bo->obj = msm_gem_new(dev, size, flags);
+ if (IS_ERR(bo->obj))
+ return PTR_ERR(bo->obj);
+
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ range_start, range_end);
+ if (ret) {
+ drm_gem_object_put(bo->obj);
+ return ret;
+ }
+
+ bo->virt = msm_gem_get_vaddr(bo->obj);
+ bo->size = size;
+
+ msm_gem_object_set_name(bo->obj, name);
+
+ return 0;
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_new(gmu->dev, 0);
+ if (!mmu)
+ return -ENODEV;
+ if (IS_ERR(mmu))
+ return PTR_ERR(mmu);
+
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
+ if (IS_ERR(gmu->aspace))
+ return PTR_ERR(gmu->aspace);
+
+ return 0;
+}
+
+/* Return the 'arc-level' for the given frequency */
+static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
+ unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ unsigned int val;
+
+ if (!freq)
+ return 0;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp))
+ return 0;
+
+ val = dev_pm_opp_get_level(opp);
+
+ dev_pm_opp_put(opp);
+
+ return val;
+}
+
+static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count, const char *id)
+{
+ int i, j;
+ const u16 *pri, *sec;
+ size_t pri_count, sec_count;
+
+ pri = cmd_db_read_aux_data(id, &pri_count);
+ if (IS_ERR(pri))
+ return PTR_ERR(pri);
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ pri_count >>= 1;
+ if (!pri_count)
+ return -EINVAL;
+
+ sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
+ if (IS_ERR(sec))
+ return PTR_ERR(sec);
+
+ sec_count >>= 1;
+ if (!sec_count)
+ return -EINVAL;
+
+ /* Construct a vote for each frequency */
+ for (i = 0; i < freqs_count; i++) {
+ u8 pindex = 0, sindex = 0;
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < pri_count; j++) {
+ if (pri[j] >= level) {
+ pindex = j;
+ break;
+ }
+ }
+
+ if (j == pri_count) {
+ DRM_DEV_ERROR(dev,
+ "Level %u not found in the RPMh list\n",
+ level);
+ DRM_DEV_ERROR(dev, "Available levels:\n");
+ for (j = 0; j < pri_count; j++)
+ DRM_DEV_ERROR(dev, " %u\n", pri[j]);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Look for a level in in the secondary list that matches. If
+ * nothing fits, use the maximum non zero vote
+ */
+
+ for (j = 0; j < sec_count; j++) {
+ if (sec[j] >= level) {
+ sindex = j;
+ break;
+ } else if (sec[j]) {
+ sindex = j;
+ }
+ }
+
+ /* Construct the vote */
+ votes[i] = ((pri[pindex] & 0xffff) << 16) |
+ (sindex << 8) | pindex;
+ }
+
+ return 0;
+}
+
+/*
+ * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
+ * to construct the list of votes on the CPU and send it over. Query the RPMh
+ * voltage levels and build the votes
+ */
+
+static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ int ret;
+
+ /* Build the GX votes */
+ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
+
+ /* Build the CX votes */
+ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
+ gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
+
+ return ret;
+}
+
+static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned long freq = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" frequency level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU frequency table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" frequency */
+ freqs[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ freqs[index++] = freq++;
+ }
+
+ return index;
+}
+
+static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ int ret = 0;
+
+ /*
+ * The GMU handles its own frequency switching so build a list of
+ * available frequencies to send during initialization
+ */
+ ret = devm_pm_opp_of_add_table(gmu->dev);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ return ret;
+ }
+
+ gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
+ gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
+
+ /*
+ * The GMU also handles GPU frequency switching so build a list
+ * from the GPU OPP table
+ */
+ gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
+ gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+
+ gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
+
+ /* Build the list of RPMh votes that we'll send to the GMU */
+ return a6xx_gmu_rpmh_votes_init(gmu);
+}
+
+static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
+{
+ int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
+
+ if (ret < 1)
+ return ret;
+
+ gmu->nr_clocks = ret;
+
+ gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "gmu");
+
+ gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "hub");
+
+ return 0;
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = ioremap(res->start, resource_size(res));
+ if (!ret) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ret;
+}
+
+static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, name);
+
+ ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
+ name, ret);
+ return ret;
+ }
+
+ disable_irq(irq);
+
+ return irq;
+}
+
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = to_platform_device(gmu->dev);
+
+ mutex_lock(&gmu->lock);
+ if (!gmu->initialized) {
+ mutex_unlock(&gmu->lock);
+ return;
+ }
+
+ gmu->initialized = false;
+
+ mutex_unlock(&gmu->lock);
+
+ pm_runtime_force_suspend(gmu->dev);
+
+ /*
+ * Since cxpd is a virt device, the devlink with gmu-dev will be removed
+ * automatically when we do detach
+ */
+ dev_pm_domain_detach(gmu->cxpd, false);
+
+ if (!IS_ERR_OR_NULL(gmu->gxpd)) {
+ pm_runtime_disable(gmu->gxpd);
+ dev_pm_domain_detach(gmu->gxpd, false);
+ }
+
+ iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
+ gmu->mmio = NULL;
+ gmu->rscc = NULL;
+
+ if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ a6xx_gmu_memory_free(gmu);
+
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+ }
+
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+}
+
+static int cxpd_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
+
+ if (action == GENPD_NOTIFY_OFF)
+ complete_all(&gmu->pd_gate);
+
+ return 0;
+}
+
+int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+{
+ struct platform_device *pdev = of_find_device_by_node(node);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ gmu->dev = &pdev->dev;
+
+ of_dma_configure(gmu->dev, node, true);
+
+ pm_runtime_enable(gmu->dev);
+
+ /* Mark legacy for manual SPTPRAC control */
+ gmu->legacy = true;
+
+ /* Map the GMU registers */
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+ if (IS_ERR(gmu->mmio)) {
+ ret = PTR_ERR(gmu->mmio);
+ goto err_mmio;
+ }
+
+ gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
+ if (IS_ERR(gmu->cxpd)) {
+ ret = PTR_ERR(gmu->cxpd);
+ goto err_mmio;
+ }
+
+ if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) {
+ ret = -ENODEV;
+ goto detach_cxpd;
+ }
+
+ init_completion(&gmu->pd_gate);
+ complete_all(&gmu->pd_gate);
+ gmu->pd_nb.notifier_call = cxpd_notifier_cb;
+
+ /* Get a link to the GX power domain to reset the GPU */
+ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
+ if (IS_ERR(gmu->gxpd)) {
+ ret = PTR_ERR(gmu->gxpd);
+ goto err_mmio;
+ }
+
+ gmu->initialized = true;
+
+ return 0;
+
+detach_cxpd:
+ dev_pm_domain_detach(gmu->cxpd, false);
+
+err_mmio:
+ iounmap(gmu->mmio);
+
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ return ret;
+}
+
+int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = of_find_device_by_node(node);
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ gmu->dev = &pdev->dev;
+
+ of_dma_configure(gmu->dev, node, true);
+
+ /* Fow now, don't do anything fancy until we get our feet under us */
+ gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+
+ pm_runtime_enable(gmu->dev);
+
+ /* Get the list of clocks */
+ ret = a6xx_gmu_clocks_probe(gmu);
+ if (ret)
+ goto err_put_device;
+
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ goto err_put_device;
+
+
+ /* A660 now requires handling "prealloc requests" in GMU firmware
+ * For now just hardcode allocations based on the known firmware.
+ * note: there is no indication that these correspond to "dummy" or
+ * "debug" regions, but this "guess" allows reusing these BOs which
+ * are otherwise unused by a660.
+ */
+ gmu->dummy.size = SZ_4K;
+ if (adreno_is_a660_family(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
+ 0x60400000, "debug");
+ if (ret)
+ goto err_memory;
+
+ gmu->dummy.size = SZ_8K;
+ }
+
+ /* Allocate memory for the GMU dummy page */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
+ 0x60000000, "dummy");
+ if (ret)
+ goto err_memory;
+
+ /* Note that a650 family also includes a660 family: */
+ if (adreno_is_a650_family(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_16M - SZ_16K, 0x04000, "icache");
+ if (ret)
+ goto err_memory;
+ /*
+ * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
+ * to allocate icache/dcache here, as per downstream code flow, but it may not actually be
+ * necessary. If you omit this step and you don't get random pagefaults, you are likely
+ * good to go without this!
+ */
+ } else if (adreno_is_a640_family(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_256K - SZ_16K, 0x04000, "icache");
+ if (ret)
+ goto err_memory;
+
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
+ SZ_256K - SZ_16K, 0x44000, "dcache");
+ if (ret)
+ goto err_memory;
+ } else if (adreno_is_a630_family(adreno_gpu)) {
+ /* HFI v1, has sptprac */
+ gmu->legacy = true;
+
+ /* Allocate memory for the GMU debug region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
+ if (ret)
+ goto err_memory;
+ }
+
+ /* Allocate memory for the GMU log region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log");
+ if (ret)
+ goto err_memory;
+
+ /* Allocate memory for for the HFI queues */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
+ if (ret)
+ goto err_memory;
+
+ /* Map the GMU registers */
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+ if (IS_ERR(gmu->mmio)) {
+ ret = PTR_ERR(gmu->mmio);
+ goto err_memory;
+ }
+
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
+ if (IS_ERR(gmu->rscc)) {
+ ret = -ENODEV;
+ goto err_mmio;
+ }
+ } else {
+ gmu->rscc = gmu->mmio + 0x23000;
+ }
+
+ /* Get the HFI and GMU interrupts */
+ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
+ gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
+
+ if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
+ ret = -ENODEV;
+ goto err_mmio;
+ }
+
+ gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
+ if (IS_ERR(gmu->cxpd)) {
+ ret = PTR_ERR(gmu->cxpd);
+ goto err_mmio;
+ }
+
+ if (!device_link_add(gmu->dev, gmu->cxpd,
+ DL_FLAG_PM_RUNTIME)) {
+ ret = -ENODEV;
+ goto detach_cxpd;
+ }
+
+ init_completion(&gmu->pd_gate);
+ complete_all(&gmu->pd_gate);
+ gmu->pd_nb.notifier_call = cxpd_notifier_cb;
+
+ /*
+ * Get a link to the GX power domain to reset the GPU in case of GMU
+ * crash
+ */
+ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
+
+ /* Get the power levels for the GMU and GPU */
+ a6xx_gmu_pwrlevels_probe(gmu);
+
+ /* Set up the HFI queues */
+ a6xx_hfi_init(gmu);
+
+ /* Initialize RPMh */
+ a6xx_gmu_rpmh_init(gmu);
+
+ gmu->initialized = true;
+
+ return 0;
+
+detach_cxpd:
+ dev_pm_domain_detach(gmu->cxpd, false);
+
+err_mmio:
+ iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
+ free_irq(gmu->gmu_irq, gmu);
+ free_irq(gmu->hfi_irq, gmu);
+
+err_memory:
+ a6xx_gmu_memory_free(gmu);
+err_put_device:
+ /* Drop reference taken in of_find_device_by_node */
+ put_device(gmu->dev);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
new file mode 100644
index 0000000000..236f81a43c
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_GMU_H_
+#define _A6XX_GMU_H_
+
+#include <linux/completion.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include "msm_drv.h"
+#include "a6xx_hfi.h"
+
+struct a6xx_gmu_bo {
+ struct drm_gem_object *obj;
+ void *virt;
+ size_t size;
+ u64 iova;
+};
+
+/*
+ * These define the different GMU wake up options - these define how both the
+ * CPU and the GMU bring up the hardware
+ */
+
+/* THe GMU has already been booted and the rentention registers are active */
+#define GMU_WARM_BOOT 0
+
+/* the GMU is coming up for the first time or back from a power collapse */
+#define GMU_COLD_BOOT 1
+
+/*
+ * These define the level of control that the GMU has - the higher the number
+ * the more things that the GMU hardware controls on its own.
+ */
+
+/* The GMU does not do any idle state management */
+#define GMU_IDLE_STATE_ACTIVE 0
+
+/* The GMU manages SPTP power collapse */
+#define GMU_IDLE_STATE_SPTP 2
+
+/* The GMU does automatic IFPC (intra-frame power collapse) */
+#define GMU_IDLE_STATE_IFPC 3
+
+struct a6xx_gmu {
+ struct device *dev;
+
+ /* For serializing communication with the GMU: */
+ struct mutex lock;
+
+ struct msm_gem_address_space *aspace;
+
+ void __iomem *mmio;
+ void __iomem *rscc;
+
+ int hfi_irq;
+ int gmu_irq;
+
+ struct device *gxpd;
+ struct device *cxpd;
+
+ int idle_level;
+
+ struct a6xx_gmu_bo hfi;
+ struct a6xx_gmu_bo debug;
+ struct a6xx_gmu_bo icache;
+ struct a6xx_gmu_bo dcache;
+ struct a6xx_gmu_bo dummy;
+ struct a6xx_gmu_bo log;
+
+ int nr_clocks;
+ struct clk_bulk_data *clocks;
+ struct clk *core_clk;
+ struct clk *hub_clk;
+
+ /* current performance index set externally */
+ int current_perf_index;
+
+ int nr_gpu_freqs;
+ unsigned long gpu_freqs[16];
+ u32 gx_arc_votes[16];
+
+ int nr_gmu_freqs;
+ unsigned long gmu_freqs[4];
+ u32 cx_arc_votes[4];
+
+ unsigned long freq;
+
+ struct a6xx_hfi_queue queues[2];
+
+ bool initialized;
+ bool hung;
+ bool legacy; /* a618 or a630 */
+
+ /* For power domain callback */
+ struct notifier_block pd_nb;
+ struct completion pd_gate;
+};
+
+static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->mmio + (offset << 2));
+}
+
+static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ msm_writel(value, gmu->mmio + (offset << 2));
+}
+
+static inline void
+gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
+{
+ memcpy_toio(gmu->mmio + (offset << 2), data, size);
+ wmb();
+}
+
+static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
+{
+ u32 val = gmu_read(gmu, reg);
+
+ val &= ~mask;
+
+ gmu_write(gmu, reg, val | or);
+}
+
+static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ val = (u64) msm_readl(gmu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->rscc + (offset << 2));
+}
+
+static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ msm_writel(value, gmu->rscc + (offset << 2));
+}
+
+#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+/*
+ * These are the available OOB (out of band requests) to the GMU where "out of
+ * band" means that the CPU talks to the GMU directly and not through HFI.
+ * Normally this works by writing a ITCM/DTCM register and then triggering a
+ * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack"
+ * bit). The state is cleared by writing the "clear' bit to the GMU interrupt.
+ *
+ * These are used to force the GMU/GPU to stay on during a critical sequence or
+ * for hardware workarounds.
+ */
+
+enum a6xx_gmu_oob_state {
+ /*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
+ GMU_OOB_BOOT_SLUMBER = 0,
+ /*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
+ GMU_OOB_GPU_SET,
+ /*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
+ GMU_OOB_DCVS_SET,
+ /*
+ * Used to keep the GPU on for CPU-side reads of performance counters.
+ */
+ GMU_OOB_PERFCOUNTER_SET,
+};
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu);
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
+
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
+void a6xx_sptprac_disable(struct a6xx_gmu *gmu);
+int a6xx_sptprac_enable(struct a6xx_gmu *gmu);
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
new file mode 100644
index 0000000000..fcd9eb53ba
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -0,0 +1,485 @@
+#ifndef A6XX_GMU_XML
+#define A6XX_GMU_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2023 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT 23
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK 0x40000000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT 30
+static inline uint32_t A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__SHIFT) & A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT 22
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT 30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT 30
+static inline uint32_t A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_SET_MASK__MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT 23
+static inline uint32_t A6XX_GMU_OOB_DCVS_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_SET_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT 31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT 31
+static inline uint32_t A6XX_GMU_OOB_DCVS_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_DCVS_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_DCVS_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_SET_MASK__MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_SET_MASK__SHIFT 18
+static inline uint32_t A6XX_GMU_OOB_GPU_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_SET_MASK__SHIFT) & A6XX_GMU_OOB_GPU_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT 26
+static inline uint32_t A6XX_GMU_OOB_GPU_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT 26
+static inline uint32_t A6XX_GMU_OOB_GPU_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_GPU_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_GPU_CLEAR_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT 17
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_SET_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_SET_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_SET_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT 25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CHECK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CHECK_MASK__MASK;
+}
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT 25
+static inline uint32_t A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__SHIFT) & A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
+#define A6XX_HFI_IRQ_DSGQ_MASK__MASK 0x00000002
+#define A6XX_HFI_IRQ_DSGQ_MASK__SHIFT 1
+static inline uint32_t A6XX_HFI_IRQ_DSGQ_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_DSGQ_MASK__SHIFT) & A6XX_HFI_IRQ_DSGQ_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK 0x00000004
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT 2
+static inline uint32_t A6XX_HFI_IRQ_BLOCKED_MSG_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_BLOCKED_MSG_MASK__SHIFT) & A6XX_HFI_IRQ_BLOCKED_MSG_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK 0x00800000
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT 23
+static inline uint32_t A6XX_HFI_IRQ_CM3_FAULT_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_CM3_FAULT_MASK__SHIFT) & A6XX_HFI_IRQ_CM3_FAULT_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
+static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000
+#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24
+static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
+}
+#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
+#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
+
+#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
+
+#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00
+
+#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00
+
+#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0
+
+#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8
+
+#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9
+
+#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa
+
+#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc
+
+#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd
+
+#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe
+
+#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+
+#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00
+
+#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01
+
+#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
+
+#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
+
+#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001
+
+#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a
+
+#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c
+
+#define REG_A6XX_GMU_CM3_CFG 0x0000502d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK;
+}
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK;
+}
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1
+
+#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2
+
+#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080
+
+#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4
+#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001
+#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0
+#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4
+static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK;
+}
+
+#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8
+#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001
+#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010
+#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100
+#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200
+#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400
+#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800
+#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000
+#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000
+#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000
+#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000
+
+#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9
+
+#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
+
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF 0x000050f1
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
+
+#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089
+
+#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3
+
+#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180
+
+#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181
+
+#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182
+
+#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183
+
+#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184
+
+#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185
+
+#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192
+#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001
+#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
+
+#define REG_A6XX_GMU_GENERAL_1 0x000051c6
+
+#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+
+#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
+
+#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d
+
+#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e
+
+#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310
+
+#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
+
+#define REG_A6XX_GMU_AHB_FENCE_STATUS_CLR 0x00009314
+
+#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
+
+#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
+
+#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
+
+#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312
+
+#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03
+
+#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
+
+#define REG_A6XX_GPU_CPR_FSM_CTL 0x0000c001
+
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a
+
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c
+
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100
+
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
+
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
+
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
+
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee
+
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496
+
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
+
+
+#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
new file mode 100644
index 0000000000..522ca7fe67
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -0,0 +1,2381 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+#include <linux/bitfield.h>
+#include <linux/devfreq.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define GPU_PAS_ID 13
+
+static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check tha the CX master is idle */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
+ ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a6xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
+ if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
+ OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+ OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
+ OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
+ }
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ update_shadow_rptr(gpu, ring);
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+}
+
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+ u64 iova)
+{
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, CP_REG_TO_MEM_0_REG(counter) |
+ CP_REG_TO_MEM_0_CNT(2) |
+ CP_REG_TO_MEM_0_64B);
+ OUT_RING(ring, lower_32_bits(iova));
+ OUT_RING(ring, upper_32_bits(iova));
+}
+
+static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+ struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+{
+ bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
+ phys_addr_t ttbr;
+ u32 asid;
+ u64 memptr = rbmemptr(ring, ttbr0);
+
+ if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
+ return;
+
+ if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+ return;
+
+ if (!sysprof) {
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ }
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
+
+ OUT_RING(ring,
+ CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
+ CP_SMMU_TABLE_UPDATE_1_ASID(asid));
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
+ OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
+
+ /*
+ * Write the new TTBR0 to the memstore. This is good for debugging.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
+ OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+ OUT_RING(ring, lower_32_bits(ttbr));
+ OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+
+ /*
+ * And finally, trigger a uche flush to be sure there isn't anything
+ * lingering in that part of the GPU
+ */
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CACHE_INVALIDATE);
+
+ if (!sysprof) {
+ /*
+ * Wait for SRAM clear after the pgtable update, so the
+ * two can happen in parallel:
+ */
+ OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
+ OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
+ OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
+ OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
+ OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
+ OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
+ OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
+
+ /* Re-enable protected mode: */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+ }
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i, ibs = 0;
+
+ a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_start));
+
+ /*
+ * For PM4 the GMU register offsets are calculated from the base of the
+ * GPU registers so we need to add 0x1a800 to the register value on A630
+ * to get the right value from PM4.
+ */
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ rbmemptr_stats(ring, index, alwayson_start));
+
+ /* Invalidate CCU depth and color */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_DEPTH));
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(PC_CCU_INVALIDATE_COLOR));
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
+ break;
+ fallthrough;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+
+ /*
+ * Periodically update shadow-wptr if needed, so that we
+ * can see partial progress of submits with large # of
+ * cmds.. otherwise we could needlessly stall waiting for
+ * ringbuffer state, simply due to looking at a shadow
+ * rptr value that has not been updated
+ */
+ if ((ibs % 32) == 0)
+ update_shadow_rptr(gpu, ring);
+ }
+
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
+ rbmemptr_stats(ring, index, alwayson_end));
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
+ CP_EVENT_WRITE_0_IRQ);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ trace_msm_gpu_submit_flush(submit,
+ gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+
+ a6xx_flush(gpu, ring);
+}
+
+const struct adreno_reglist a612_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000081},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01202222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05522022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+/* For a615 family (a615, a616, a618 and a619) */
+const struct adreno_reglist a615_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002020},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a630_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a640_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a650_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a660_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
+const struct adreno_reglist a690_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL, 0x8AA8AA82},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
+ {REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
+ {REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
+ {}
+};
+
+static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ const struct adreno_reglist *reg;
+ unsigned int i;
+ u32 val, clock_cntl_on;
+
+ if (!adreno_gpu->info->hwcg)
+ return;
+
+ if (adreno_is_a630(adreno_gpu))
+ clock_cntl_on = 0x8aa8aa02;
+ else if (adreno_is_a610(adreno_gpu))
+ clock_cntl_on = 0xaaa8aa82;
+ else
+ clock_cntl_on = 0x8aa8aa82;
+
+ val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
+
+ /* Don't re-program the registers if they are already correct */
+ if ((!state && !val) || (state && (val == clock_cntl_on)))
+ return;
+
+ /* Disable SP clock before programming HWCG registers */
+ if (!adreno_is_a610(adreno_gpu))
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
+
+ for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
+ gpu_write(gpu, reg->offset, state ? reg->value : 0);
+
+ /* Enable SP clock */
+ if (!adreno_is_a610(adreno_gpu))
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
+}
+
+/* For a615, a616, a618, a619, a630, a640 and a680 */
+static const u32 a6xx_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
+};
+
+/* These are for a620 and a650 */
+static const u32 a650_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+/* These are for a635 and a660 */
+static const u32 a660_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x0005),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x01db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x012f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x015f),
+ A6XX_PROTECT_NORDWR(0x0d000, 0x05ff),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1a400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
+ A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
+ A6XX_PROTECT_NORDWR(0x1f860, 0x0000),
+ A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
+ A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
+};
+
+/* These are for a690 */
+static const u32 a690_protect[] = {
+ A6XX_PROTECT_RDONLY(0x00000, 0x004ff),
+ A6XX_PROTECT_RDONLY(0x00501, 0x00001),
+ A6XX_PROTECT_RDONLY(0x0050b, 0x002f4),
+ A6XX_PROTECT_NORDWR(0x0050e, 0x00000),
+ A6XX_PROTECT_NORDWR(0x00510, 0x00000),
+ A6XX_PROTECT_NORDWR(0x00534, 0x00000),
+ A6XX_PROTECT_NORDWR(0x00800, 0x00082),
+ A6XX_PROTECT_NORDWR(0x008a0, 0x00008),
+ A6XX_PROTECT_NORDWR(0x008ab, 0x00024),
+ A6XX_PROTECT_RDONLY(0x008de, 0x000ae),
+ A6XX_PROTECT_NORDWR(0x00900, 0x0004d),
+ A6XX_PROTECT_NORDWR(0x0098d, 0x00272),
+ A6XX_PROTECT_NORDWR(0x00e00, 0x00001),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x0000c),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x000c3),
+ A6XX_PROTECT_RDONLY(0x03cc4, 0x01fff),
+ A6XX_PROTECT_NORDWR(0x08630, 0x001cf),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x00000),
+ A6XX_PROTECT_NORDWR(0x08e08, 0x00007),
+ A6XX_PROTECT_NORDWR(0x08e50, 0x0001f),
+ A6XX_PROTECT_NORDWR(0x08e80, 0x0027f),
+ A6XX_PROTECT_NORDWR(0x09624, 0x001db),
+ A6XX_PROTECT_NORDWR(0x09e60, 0x00011),
+ A6XX_PROTECT_NORDWR(0x09e78, 0x00187),
+ A6XX_PROTECT_NORDWR(0x0a630, 0x001cf),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x00000),
+ A6XX_PROTECT_NORDWR(0x0ae50, 0x0012f),
+ A6XX_PROTECT_NORDWR(0x0b604, 0x00000),
+ A6XX_PROTECT_NORDWR(0x0b608, 0x00006),
+ A6XX_PROTECT_NORDWR(0x0be02, 0x00001),
+ A6XX_PROTECT_NORDWR(0x0be20, 0x0015f),
+ A6XX_PROTECT_NORDWR(0x0d000, 0x005ff),
+ A6XX_PROTECT_NORDWR(0x0f000, 0x00bff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x01fff),
+ A6XX_PROTECT_NORDWR(0x11c00, 0x00000), /*note: infiite range */
+};
+
+static void a6xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const u32 *regs = a6xx_protect;
+ unsigned i, count, count_max;
+
+ if (adreno_is_a650(adreno_gpu)) {
+ regs = a650_protect;
+ count = ARRAY_SIZE(a650_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
+ } else if (adreno_is_a690(adreno_gpu)) {
+ regs = a690_protect;
+ count = ARRAY_SIZE(a690_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a690_protect) > 48);
+ } else if (adreno_is_a660_family(adreno_gpu)) {
+ regs = a660_protect;
+ count = ARRAY_SIZE(a660_protect);
+ count_max = 48;
+ BUILD_BUG_ON(ARRAY_SIZE(a660_protect) > 48);
+ } else {
+ regs = a6xx_protect;
+ count = ARRAY_SIZE(a6xx_protect);
+ count_max = 32;
+ BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
+ }
+
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL,
+ A6XX_CP_PROTECT_CNTL_ACCESS_PROT_EN |
+ A6XX_CP_PROTECT_CNTL_ACCESS_FAULT_ON_VIOL_EN |
+ A6XX_CP_PROTECT_CNTL_LAST_SPAN_INF_RANGE);
+
+ for (i = 0; i < count - 1; i++) {
+ /* Intentionally skip writing to some registers */
+ if (regs[i])
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
+ }
+ /* last CP_PROTECT to have "infinite" length on the last entry */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
+}
+
+static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
+ u32 rgb565_predicator = 0;
+ /* Unknown, introduced with A650 family */
+ u32 uavflagprd_inv = 0;
+ /* Whether the minimum access length is 64 bits */
+ u32 min_acc_len = 0;
+ /* Entirely magic, per-GPU-gen value */
+ u32 ubwc_mode = 0;
+ /*
+ * The Highest Bank Bit value represents the bit of the highest DDR bank.
+ * We then subtract 13 from it (13 is the minimum value allowed by hw) and
+ * write the lowest two bits of the remaining value as hbb_lo and the
+ * one above it as hbb_hi to the hardware. This should ideally use DRAM
+ * type detection.
+ */
+ u32 hbb_hi = 0;
+ u32 hbb_lo = 2;
+ /* Unknown, introduced with A640/680 */
+ u32 amsbc = 0;
+
+ if (adreno_is_a610(adreno_gpu)) {
+ /* HBB = 14 */
+ hbb_lo = 1;
+ min_acc_len = 1;
+ ubwc_mode = 1;
+ }
+
+ /* a618 is using the hw default values */
+ if (adreno_is_a618(adreno_gpu))
+ return;
+
+ if (adreno_is_a619_holi(adreno_gpu))
+ hbb_lo = 0;
+
+ if (adreno_is_a640_family(adreno_gpu))
+ amsbc = 1;
+
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu)) {
+ /* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
+ hbb_lo = 3;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
+ if (adreno_is_a690(adreno_gpu)) {
+ hbb_lo = 2;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
+ if (adreno_is_7c3(adreno_gpu)) {
+ hbb_lo = 1;
+ amsbc = 1;
+ rgb565_predicator = 1;
+ uavflagprd_inv = 2;
+ }
+
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
+ rgb565_predicator << 11 | hbb_hi << 10 | amsbc << 4 |
+ min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
+ min_acc_len << 3 | hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
+ uavflagprd_inv << 4 | min_acc_len << 3 |
+ hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21);
+}
+
+static int a6xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002f);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+
+ /* Pad rest of the cmds with 0's */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+/*
+ * Check that the microcode version is new enough to include several key
+ * security fixes. Return true if the ucode is safe.
+ */
+static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+ struct drm_gem_object *obj)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE];
+ u32 *buf = msm_gem_get_vaddr(obj);
+ bool ret = false;
+
+ if (IS_ERR(buf))
+ return false;
+
+ /*
+ * Targets up to a640 (a618, a630 and a640) need to check for a
+ * microcode version that is patched to support the whereami opcode or
+ * one that is new enough to include it by default.
+ *
+ * a650 tier targets don't need whereami but still need to be
+ * equal to or newer than 0.95 for other security fixes
+ *
+ * a660 targets have all the critical security fixes from the start
+ */
+ if (!strcmp(sqe_name, "a630_sqe.fw")) {
+ /*
+ * If the lowest nibble is 0xa that is an indication that this
+ * microcode has been patched. The actual version is in dword
+ * [3] but we only care about the patchlevel which is the lowest
+ * nibble of dword [3]
+ *
+ * Otherwise check that the firmware is greater than or equal
+ * to 1.90 which was the first version that had this fix built
+ * in
+ */
+ if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
+ (buf[0] & 0xfff) >= 0x190) {
+ a6xx_gpu->has_whereami = true;
+ ret = true;
+ goto out;
+ }
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a630 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x190);
+ } else if (!strcmp(sqe_name, "a650_sqe.fw")) {
+ if ((buf[0] & 0xfff) >= 0x095) {
+ ret = true;
+ goto out;
+ }
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "a650 SQE ucode is too old. Have version %x need at least %x\n",
+ buf[0] & 0xfff, 0x095);
+ } else if (!strcmp(sqe_name, "a660_sqe.fw")) {
+ ret = true;
+ } else {
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "unknown GPU, add it to a6xx_ucode_check_version()!!\n");
+ }
+out:
+ msm_gem_put_vaddr(obj);
+ return ret;
+}
+
+static int a6xx_ucode_load(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (!a6xx_gpu->sqe_bo) {
+ a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
+
+ if (IS_ERR(a6xx_gpu->sqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate SQE ucode: %d\n", ret);
+
+ return ret;
+ }
+
+ msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
+ if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ return -EPERM;
+ }
+ }
+
+ /*
+ * Expanded APRIV and targets that support WHERE_AM_I both need a
+ * privileged buffer to store the RPTR shadow
+ */
+ if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) &&
+ !a6xx_gpu->shadow_bo) {
+ a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+ sizeof(u32) * gpu->nr_rings,
+ MSM_BO_WC | MSM_BO_MAP_PRIV,
+ gpu->aspace, &a6xx_gpu->shadow_bo,
+ &a6xx_gpu->shadow_iova);
+
+ if (IS_ERR(a6xx_gpu->shadow))
+ return PTR_ERR(a6xx_gpu->shadow);
+
+ msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
+ }
+
+ return 0;
+}
+
+static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ if (loaded)
+ return 0;
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
+#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+static int hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int ret;
+
+ if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ /* Make sure the GMU keeps the GPU on while we set it up */
+ ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
+ /* Let's make extra sure that the GPU can access the memory.. */
+ mb();
+ } else if (a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+ /* Let's make extra sure that the GPU can access the memory.. */
+ mb();
+ }
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Turn on 64 bit addressing for all blocks */
+ gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /* enable hardware clockgating */
+ a6xx_set_hwcg(gpu, true);
+
+ /* VBIF/GBIF start*/
+ if (adreno_is_a610(adreno_gpu) ||
+ adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ } else {
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ }
+
+ if (adreno_is_a630(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
+ gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
+
+ if (!adreno_is_a650_family(adreno_gpu)) {
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000);
+
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX,
+ 0x00100000 + adreno_gpu->info->gmem - 1);
+ }
+
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+
+ if (adreno_is_a640_family(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+ } else if (adreno_is_a610(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
+ } else {
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+ }
+
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
+
+ /* Setting the mem pool size */
+ if (adreno_is_a610(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
+ } else
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
+
+ /* Setting the primFifo thresholds default values,
+ * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
+ */
+ if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
+ else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
+ else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
+ else if (adreno_is_a619(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
+ else if (adreno_is_a610(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
+ else
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
+
+ /* Set the AHB default slave response to "ERROR" */
+ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT);
+
+ a6xx_set_ubwc_config(gpu);
+
+ /* Enable fault detection */
+ if (adreno_is_a619(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
+ else if (adreno_is_a610(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff);
+ else
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
+
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+
+ /* Set weights for bicubic filtering */
+ if (adreno_is_a650_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ 0x3fe05ff4);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ 0x3fa0ebee);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ 0x3f5193ed);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ 0x3f0243f0);
+ }
+
+ /* Set up the CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+
+ /* Enable the power counter */
+ gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, BIT(5));
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+
+ /* Protect registers from the CP */
+ a6xx_set_cp_protect(gpu);
+
+ if (adreno_is_a660_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
+ }
+
+ /* Set dualQ + disable afull for A660 GPU */
+ if (adreno_is_a660(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
+
+ /* Enable expanded apriv for targets that support it */
+ if (gpu->hw_apriv) {
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
+ }
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
+
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
+
+ /* Targets that support extended APRIV can use the RPTR shadow from
+ * hardware but all the other ones need to disable the feature. Targets
+ * that support the WHERE_AM_I opcode can use that instead
+ */
+ if (adreno_gpu->base.hw_apriv)
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+ else
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Configure the RPTR shadow if needed: */
+ if (a6xx_gpu->shadow_bo) {
+ gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR,
+ shadowptr(a6xx_gpu, gpu->rb[0]));
+ }
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ gpu->cur_ctx_seqno = 0;
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+
+ ret = a6xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a6xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ } else {
+ return ret;
+ }
+
+out:
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return ret;
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ if (a6xx_gpu->gmu.legacy) {
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ }
+
+ return ret;
+}
+
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = hw_init(gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return ret;
+}
+
+static void a6xx_dump(struct msm_gpu *gpu)
+{
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static void a6xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i, active_submits;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++)
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ /*
+ * To handle recovery specific sequences during the rpm suspend we are
+ * about to trigger
+ */
+ a6xx_gpu->hung = true;
+
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
+ pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
+
+ /* active_submit won't change until we make a submission */
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+
+ /*
+ * Temporarily clear active_submits count to silence a WARN() in the
+ * runtime suspend cb
+ */
+ gpu->active_submits = 0;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu)) {
+ /* Drain the outstanding traffic on memory buses */
+ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+
+ /* Reset the GPU to a clean state */
+ a6xx_gpu_sw_reset(gpu, true);
+ a6xx_gpu_sw_reset(gpu, false);
+ }
+
+ reinit_completion(&gmu->pd_gate);
+ dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
+ dev_pm_genpd_synced_poweroff(gmu->cxpd);
+
+ /* Drop the rpm refcount from active submits */
+ if (active_submits)
+ pm_runtime_put(&gpu->pdev->dev);
+
+ /* And the final one from recover worker */
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000)))
+ DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n");
+
+ dev_pm_genpd_remove_notifier(gmu->cxpd);
+
+ pm_runtime_use_autosuspend(&gpu->pdev->dev);
+
+ if (active_submits)
+ pm_runtime_get(&gpu->pdev->dev);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ gpu->active_submits = active_submits;
+ mutex_unlock(&gpu->active_lock);
+
+ msm_gpu_hw_init(gpu);
+ a6xx_gpu->hung = false;
+}
+
+static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+{
+ static const char *uche_clients[7] = {
+ "VFD", "SP", "VSC", "VPC", "HLSQ", "PC", "LRZ",
+ };
+ u32 val;
+
+ if (mid < 1 || mid > 3)
+ return "UNKNOWN";
+
+ /*
+ * The source of the data depends on the mid ID read from FSYNR1.
+ * and the client ID read from the UCHE block
+ */
+ val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF);
+
+ /* mid = 3 is most precise and refers to only one block per client */
+ if (mid == 3)
+ return uche_clients[val & 7];
+
+ /* For mid=2 the source is TP or VFD except when the client id is 0 */
+ if (mid == 2)
+ return ((val & 7) == 0) ? "TP" : "TP|VFD";
+
+ /* For mid=1 just return "UCHE" as a catchall for everything else */
+ return "UCHE";
+}
+
+static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id)
+{
+ if (id == 0)
+ return "CP";
+ else if (id == 4)
+ return "CCU";
+ else if (id == 6)
+ return "CDP Prefetch";
+
+ return a6xx_uche_fault_block(gpu, id);
+}
+
+static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_gpu *gpu = arg;
+ struct adreno_smmu_fault_info *info = data;
+ const char *block = "unknown";
+
+ u32 scratch[] = {
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)),
+ };
+
+ if (info)
+ block = a6xx_fault_block(gpu, info->fsynr1 & 0xff);
+
+ return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
+}
+
+static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
+
+ if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
+ val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A6XX_CP_INT_CP_UCODE_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP ucode error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
+
+ if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 20) ? "READ" : "WRITE",
+ (val & 0x3ffff), val);
+ }
+
+ if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
+
+ if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
+
+}
+
+static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
+ return;
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ if (!adreno_has_gmu_wrapper(adreno_gpu))
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
+ gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
+ gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (priv->disable_err_irq)
+ status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a6xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a6xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
+{
+ llcc_slice_deactivate(a6xx_gpu->llc_slice);
+ llcc_slice_deactivate(a6xx_gpu->htw_llc_slice);
+}
+
+static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ u32 cntl1_regval = 0;
+
+ if (IS_ERR(a6xx_gpu->llc_mmio))
+ return;
+
+ if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+
+ gpu_scid &= 0x1f;
+ cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
+ (gpu_scid << 15) | (gpu_scid << 20);
+
+ /* On A660, the SCID programming for UCHE traffic is done in
+ * A6XX_GBIF_SCACHE_CNTL0[14:10]
+ */
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+ (1 << 8), (gpu_scid << 10) | (1 << 8));
+ }
+
+ /*
+ * For targets with a MMU500, activate the slice but don't program the
+ * register. The XBL will take care of that.
+ */
+ if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) {
+ if (!a6xx_gpu->have_mmu500) {
+ u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice);
+
+ gpuhtw_scid &= 0x1f;
+ cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid);
+ }
+ }
+
+ if (!cntl1_regval)
+ return;
+
+ /*
+ * Program the slice IDs for the various GPU blocks and GPU MMU
+ * pagetables
+ */
+ if (!a6xx_gpu->have_mmu500) {
+ a6xx_llc_write(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
+
+ /*
+ * Program cacheability overrides to not allocate cache
+ * lines on a write miss
+ */
+ a6xx_llc_rmw(a6xx_gpu,
+ REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
+ return;
+ }
+
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
+}
+
+static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
+{
+ /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
+ if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
+ return;
+
+ llcc_slice_putd(a6xx_gpu->llc_slice);
+ llcc_slice_putd(a6xx_gpu->htw_llc_slice);
+}
+
+static void a6xx_llc_slices_init(struct platform_device *pdev,
+ struct a6xx_gpu *a6xx_gpu)
+{
+ struct device_node *phandle;
+
+ /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */
+ if (adreno_has_gmu_wrapper(&a6xx_gpu->base))
+ return;
+
+ /*
+ * There is a different programming path for targets with an mmu500
+ * attached, so detect if that is the case
+ */
+ phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
+ a6xx_gpu->have_mmu500 = (phandle &&
+ of_device_is_compatible(phandle, "arm,mmu-500"));
+ of_node_put(phandle);
+
+ if (a6xx_gpu->have_mmu500)
+ a6xx_gpu->llc_mmio = NULL;
+ else
+ a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem");
+
+ a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
+ a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
+
+ if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
+ a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
+}
+
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
+#define VBIF_RESET_ACK_MASK 0xF0
+#define GPR0_GBIF_HALT_REQUEST 0x1E0
+
+void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST);
+ spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
+ (VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
+ } else if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ (VBIF_XIN_HALT_CTRL0_MASK)) == VBIF_XIN_HALT_CTRL0_MASK);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ if (gx_off) {
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
+void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
+{
+ /* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
+ if (adreno_is_a610(to_adreno_gpu(gpu)))
+ return;
+
+ gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
+ /* Perform a bogus read and add a brief delay to ensure ordering. */
+ gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD);
+ udelay(1);
+
+ /* The reset line needs to be asserted for at least 100 us */
+ if (assert)
+ udelay(100);
+}
+
+static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ gpu->needs_hw_init = true;
+
+ trace_msm_gpu_resume(0);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = a6xx_gmu_resume(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+ if (ret)
+ return ret;
+
+ msm_devfreq_resume(gpu);
+
+ a6xx_llc_activate(a6xx_gpu);
+
+ return ret;
+}
+
+static int a6xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned long freq = gpu->fast_rate;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ gpu->needs_hw_init = true;
+
+ trace_msm_gpu_resume(0);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_set_opp;
+ }
+ dev_pm_opp_put(opp);
+
+ /* Set the core clock and bus bw, having VDD scaling in mind */
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+
+ pm_runtime_resume_and_get(gmu->dev);
+ pm_runtime_resume_and_get(gmu->gxpd);
+
+ ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
+ if (ret)
+ goto err_bulk_clk;
+
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
+ /* If anything goes south, tear the GPU down piece by piece.. */
+ if (ret) {
+err_bulk_clk:
+ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
+ }
+err_set_opp:
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ if (!ret)
+ msm_devfreq_resume(gpu);
+
+ return ret;
+}
+
+static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i, ret;
+
+ trace_msm_gpu_suspend(0);
+
+ a6xx_llc_deactivate(a6xx_gpu);
+
+ msm_devfreq_suspend(gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = a6xx_gmu_stop(a6xx_gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+ if (ret)
+ return ret;
+
+ if (a6xx_gpu->shadow_bo)
+ for (i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
+
+ gpu->suspend_count++;
+
+ return 0;
+}
+
+static int a6xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i;
+
+ trace_msm_gpu_suspend(0);
+
+ msm_devfreq_suspend(gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ /* Drain the outstanding traffic on memory buses */
+ a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_disable(gmu);
+
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
+
+ pm_runtime_put_sync(gmu->gxpd);
+ dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
+ pm_runtime_put_sync(gmu->dev);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ if (a6xx_gpu->shadow_bo)
+ for (i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
+
+ gpu->suspend_count++;
+
+ return 0;
+}
+
+static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return 0;
+}
+
+static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
+ return 0;
+}
+
+static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ return a6xx_gpu->cur_ring;
+}
+
+static void a6xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (a6xx_gpu->sqe_bo) {
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->sqe_bo);
+ }
+
+ if (a6xx_gpu->shadow_bo) {
+ msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+ drm_gem_object_put(a6xx_gpu->shadow_bo);
+ }
+
+ a6xx_llc_slices_destroy(a6xx_gpu);
+
+ a6xx_gmu_remove(a6xx_gpu);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a6xx_gpu);
+}
+
+static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u64 busy_cycles;
+
+ /* 19.2MHz */
+ *out_sample_rate = 19200000;
+
+ busy_cycles = gmu_read64(&a6xx_gpu->gmu,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
+
+ return busy_cycles;
+}
+
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ a6xx_gmu_set_freq(gpu, opp, suspended);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+}
+
+static struct msm_gem_address_space *
+a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ unsigned long quirks = 0;
+
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) &&
+ !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY))
+ quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+
+ return adreno_iommu_create_address_space(gpu, pdev, quirks);
+}
+
+static struct msm_gem_address_space *
+a6xx_create_private_address_space(struct msm_gpu *gpu)
+{
+ struct msm_mmu *mmu;
+
+ mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
+
+ return msm_gem_address_space_create(mmu,
+ "gpu", 0x100000000ULL,
+ adreno_private_address_space_size(gpu));
+}
+
+static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+ return a6xx_gpu->shadow[ring->id];
+
+ return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+}
+
+static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct msm_cp_state cp_state = {
+ .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
+ .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
+ .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
+ };
+ bool progress;
+
+ /*
+ * Adjust the remaining data to account for what has already been
+ * fetched from memory, but not yet consumed by the SQE.
+ *
+ * This is not *technically* correct, the amount buffered could
+ * exceed the IB size due to hw prefetching ahead, but:
+ *
+ * (1) We aren't trying to find the exact position, just whether
+ * progress has been made
+ * (2) The CP_REG_TO_MEM at the end of a submit should be enough
+ * to prevent prefetching into an unrelated submit. (And
+ * either way, at some point the ROQ will be full.)
+ */
+ cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16;
+ cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16;
+
+ progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state));
+
+ ring->last_cp_state = cp_state;
+
+ return progress;
+}
+
+static u32 fuse_to_supp_hw(const struct adreno_info *info, u32 fuse)
+{
+ if (!info->speedbins)
+ return UINT_MAX;
+
+ for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++)
+ if (info->speedbins[i].fuse == fuse)
+ return BIT(info->speedbins[i].speedbin);
+
+ return UINT_MAX;
+}
+
+static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *info)
+{
+ u32 supp_hw;
+ u32 speedbin;
+ int ret;
+
+ ret = adreno_read_speedbin(dev, &speedbin);
+ /*
+ * -ENOENT means that the platform doesn't support speedbin which is
+ * fine
+ */
+ if (ret == -ENOENT) {
+ return 0;
+ } else if (ret) {
+ dev_err_probe(dev, ret,
+ "failed to read speed-bin. Some OPPs may not be supported by hardware\n");
+ return ret;
+ }
+
+ supp_hw = fuse_to_supp_hw(info, speedbin);
+
+ if (supp_hw == UINT_MAX) {
+ DRM_DEV_ERROR(dev,
+ "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n",
+ speedbin);
+ supp_hw = BIT(0); /* Default */
+ }
+
+ ret = devm_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_address_space = a6xx_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .get_timestamp = a6xx_gmu_get_timestamp,
+};
+
+static const struct adreno_gpu_funcs funcs_gmuwrapper = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a6xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .show = a6xx_show,
+#endif
+ .gpu_busy = a6xx_gpu_busy,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
+#endif
+ .create_address_space = a6xx_create_address_space,
+ .create_private_address_space = a6xx_create_private_address_space,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a6xx_progress,
+ },
+ .get_timestamp = a6xx_get_timestamp,
+};
+
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ mutex_init(&a6xx_gpu->gmu.lock);
+
+ adreno_gpu->registers = NULL;
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
+
+ adreno_gpu->base.hw_apriv =
+ !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
+
+ a6xx_llc_slices_init(pdev, a6xx_gpu);
+
+ ret = a6xx_set_supported_hw(&pdev->dev, config->info);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
+ else
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ priv->gpu_clamp_to_idle = true;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
+ else
+ ret = a6xx_gmu_init(a6xx_gpu, node);
+ of_node_put(node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
+ a6xx_fault_handler);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
new file mode 100644
index 0000000000..34822b0807
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. */
+
+#ifndef __A6XX_GPU_H__
+#define __A6XX_GPU_H__
+
+
+#include "adreno_gpu.h"
+#include "a6xx.xml.h"
+
+#include "a6xx_gmu.h"
+
+extern bool hang_debug;
+
+struct a6xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *sqe_bo;
+ uint64_t sqe_iova;
+
+ struct msm_ringbuffer *cur_ring;
+
+ struct a6xx_gmu gmu;
+
+ struct drm_gem_object *shadow_bo;
+ uint64_t shadow_iova;
+ uint32_t *shadow;
+
+ bool has_whereami;
+
+ void __iomem *llc_mmio;
+ void *llc_slice;
+ void *htw_llc_slice;
+ bool have_mmu500;
+ bool hung;
+};
+
+#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for
+ * _len + 1 registers starting at _reg.
+ */
+#define A6XX_PROTECT_NORDWR(_reg, _len) \
+ ((1 << 31) | \
+ (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define A6XX_PROTECT_RDONLY(_reg, _len) \
+ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
+{
+ if(adreno_is_a630(gpu))
+ return false;
+
+ return true;
+}
+
+static inline void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
+{
+ return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
+}
+
+static inline u32 a6xx_llc_read(struct a6xx_gpu *a6xx_gpu, u32 reg)
+{
+ return msm_readl(a6xx_gpu->llc_mmio + (reg << 2));
+}
+
+static inline void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
+{
+ msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
+}
+
+#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
+ ((_ring)->id * sizeof(uint32_t)))
+
+int a6xx_gmu_resume(struct a6xx_gpu *gpu);
+int a6xx_gmu_stop(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
+
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+
+int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended);
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
+void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
+void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
+
+#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644
index 0000000000..4e5d650578
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -0,0 +1,1401 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+ const void *handle;
+ u32 *data;
+};
+
+struct a6xx_gpu_state {
+ struct msm_gpu_state base;
+
+ struct a6xx_gpu_state_obj *gmu_registers;
+ int nr_gmu_registers;
+
+ struct a6xx_gpu_state_obj *registers;
+ int nr_registers;
+
+ struct a6xx_gpu_state_obj *shaders;
+ int nr_shaders;
+
+ struct a6xx_gpu_state_obj *clusters;
+ int nr_clusters;
+
+ struct a6xx_gpu_state_obj *dbgahb_clusters;
+ int nr_dbgahb_clusters;
+
+ struct a6xx_gpu_state_obj *indexed_regs;
+ int nr_indexed_regs;
+
+ struct a6xx_gpu_state_obj *debugbus;
+ int nr_debugbus;
+
+ struct a6xx_gpu_state_obj *vbif_debugbus;
+
+ struct a6xx_gpu_state_obj *cx_debugbus;
+ int nr_cx_debugbus;
+
+ struct msm_gpu_state_bo *gmu_log;
+ struct msm_gpu_state_bo *gmu_hfi;
+ struct msm_gpu_state_bo *gmu_debug;
+
+ s32 hfi_queue_history[2][HFI_HISTORY_SZ];
+
+ struct list_head objs;
+
+ bool gpu_initialized;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+ in[0] = val;
+ in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+ in[0] = target;
+ in[1] = (((u64) reg) << 44 | dwords);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+ in[0] = 0;
+ in[1] = 0;
+
+ return 2;
+}
+
+struct a6xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a6xx_state_memobj {
+ struct list_head node;
+ unsigned long long data[];
+};
+
+static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+ struct a6xx_state_memobj *obj =
+ kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ list_add_tail(&obj->node, &a6xx_state->objs);
+ return &obj->data;
+}
+
+static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+ size_t size)
+{
+ void *dst = state_kcalloc(a6xx_state, 1, size);
+
+ if (dst)
+ memcpy(dst, src, size);
+ return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new(gpu->dev,
+ SZ_1M, MSM_BO_WC, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+ return -EINVAL;
+
+ /* Make sure all pending memory writes are posted */
+ wmb();
+
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+ ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+ val & 0x02, 100, 10000);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+ return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+ msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+ msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+ u32 reg, int count, u32 *data)
+{
+ int i;
+
+ gpu_write(gpu, ctrl0, reg);
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, ctrl1, i);
+ data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+ }
+
+ return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+ ((16 * AXI_ARB_BLOCKS) + \
+ (18 * XIN_AXI_BLOCKS) + \
+ (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_gpu_state_obj *obj)
+{
+ u32 clk, *ptr;
+ int i;
+
+ obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+ sizeof(u32));
+ if (!obj->data)
+ return;
+
+ obj->handle = NULL;
+
+ /* Get the current clock setting */
+ clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+ /* Force on the bus so we can read it */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+ clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ /* We will read from BUS2 first, so disable BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+ /* Enable the VBIF bus for reading */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+ ptr = obj->data;
+
+ for (i = 0; i < AXI_ARB_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << (i + 16), 16, ptr);
+
+ for (i = 0; i < XIN_AXI_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << i, 18, ptr);
+
+ /* Stop BUS2 so we can turn on BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+ for (i = 0; i < XIN_CORE_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+ 1 << i, 12, ptr);
+
+ /* Restore the VBIF clock setting */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct resource *res;
+ void __iomem *cxdbg = NULL;
+ int nr_debugbus_blocks;
+
+ /* Set up the GX debug bus */
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+ /* Set up the CX debug bus - it lives elsewhere in the system so do a
+ * temporary ioremap for the registers
+ */
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+ "cx_dbgc");
+
+ if (res)
+ cxdbg = ioremap(res->start, resource_size(res));
+
+ if (cxdbg) {
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
+ 0x76543210);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
+ 0xFEDCBA98);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ }
+
+ nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
+ (a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+
+ if (adreno_is_a650_family(to_adreno_gpu(gpu)))
+ nr_debugbus_blocks += ARRAY_SIZE(a650_debugbus_blocks);
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a6xx_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+
+ /*
+ * GBIF has same debugbus as of other GPU blocks, fall back to
+ * default path if GPU uses GBIF, also GBIF uses exactly same
+ * ID as of VBIF.
+ */
+ if (a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_get_debugbus_block(gpu, a6xx_state,
+ &a6xx_gbif_debugbus_block,
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus += 1;
+ }
+
+
+ if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
+ for (i = 0; i < ARRAY_SIZE(a650_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a650_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+ }
+ }
+
+ /* Dump the VBIF debugbus on applicable targets */
+ if (!a6xx_has_gbif(to_adreno_gpu(gpu))) {
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
+
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+ }
+
+ if (cxdbg) {
+ a6xx_state->cx_debugbus =
+ state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+ sizeof(*a6xx_state->cx_debugbus));
+
+ if (a6xx_state->cx_debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+ a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_state,
+ &a6xx_cx_debugbus_blocks[i],
+ &a6xx_state->cx_debugbus[i]);
+
+ a6xx_state->nr_cx_debugbus =
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ }
+
+ iounmap(cxdbg);
+ }
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_dbgahb_cluster *dbgahb,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (dbgahb->statetype + i * 2) << 8);
+
+ for (j = 0; j < dbgahb->count; j += 2) {
+ int count = RANGE(dbgahb->registers, j);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ dbgahb->registers[j] - (dbgahb->base >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = dbgahb;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_dbgahb_clusters),
+ sizeof(*a6xx_state->dbgahb_clusters));
+
+ if (!a6xx_state->dbgahb_clusters)
+ return;
+
+ a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+ a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+ &a6xx_dbgahb_clusters[i],
+ &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_cluster *cluster,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+ u32 id = cluster->id;
+
+ /* Skip registers that are not present on older generation */
+ if (!adreno_is_a660_family(adreno_gpu) &&
+ cluster->registers == a660_fe_cluster)
+ return;
+
+ if (adreno_is_a650_family(adreno_gpu) &&
+ cluster->registers == a6xx_ps_cluster)
+ id = CLUSTER_VPC_PS;
+
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel_reg)
+ in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+ (id << 8) | (i << 4) | i);
+
+ for (j = 0; j < cluster->count; j += 2) {
+ int count = RANGE(cluster->registers, j);
+
+ in += CRASHDUMP_READ(in, cluster->registers[j],
+ count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = cluster;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+ if (!a6xx_state->clusters)
+ return;
+
+ a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+ a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+ &a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_shader_block *block,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+ int i;
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (block->type << 8) | i);
+
+ in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+ block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = block;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->shaders = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+ if (!a6xx_state->shaders)
+ return;
+
+ a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+ a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+ &a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ regs->registers[i] - (regs->val0 >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ /* Skip unsupported registers on older generations */
+ if (!adreno_is_a660_family(to_adreno_gpu(gpu)) &&
+ (regs->registers == a660_registers))
+ return;
+
+ /* Some blocks might need to program a selector register first */
+ if (regs->val0)
+ in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+
+ in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i, regcount = 0, index = 0;
+
+ /* Skip unsupported registers on older generations */
+ if (!adreno_is_a660_family(to_adreno_gpu(gpu)) &&
+ (regs->registers == a660_registers))
+ return;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gpu_read(gpu,
+ regs->registers[i] + j);
+ }
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ bool rscc)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++) {
+ u32 offset = regs->registers[i] + j;
+ u32 val;
+
+ if (rscc)
+ val = gmu_read_rscc(gmu, offset);
+ else
+ val = gmu_read(gmu, offset);
+
+ obj->data[index++] = val;
+ }
+ }
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+ 3, sizeof(*a6xx_state->gmu_registers));
+
+ if (!a6xx_state->gmu_registers)
+ return;
+
+ a6xx_state->nr_gmu_registers = 3;
+
+ /* Get the CX GMU registers from AHB */
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+ &a6xx_state->gmu_registers[0], false);
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+ &a6xx_state->gmu_registers[1], true);
+
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+ gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
+ &a6xx_state->gmu_registers[2], false);
+}
+
+static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
+ struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo)
+{
+ struct msm_gpu_state_bo *snapshot;
+
+ if (!bo->size)
+ return NULL;
+
+ snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
+ if (!snapshot)
+ return NULL;
+
+ snapshot->iova = bo->iova;
+ snapshot->size = bo->size;
+ snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL);
+ if (!snapshot->data)
+ return NULL;
+
+ memcpy(snapshot->data, bo->virt, bo->size);
+
+ return snapshot;
+}
+
+static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned i, j;
+
+ BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history));
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ;
+ a6xx_state->hfi_queue_history[i][j] = queue->history[idx];
+ }
+ }
+}
+
+#define A6XX_GBIF_REGLIST_SIZE 1
+static void a6xx_get_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ ARRAY_SIZE(a6xx_reglist) +
+ ARRAY_SIZE(a6xx_hlsq_reglist) + A6XX_GBIF_REGLIST_SIZE;
+ int index = 0;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ a6xx_state->registers = state_kcalloc(a6xx_state,
+ count, sizeof(*a6xx_state->registers));
+
+ if (!a6xx_state->registers)
+ return;
+
+ a6xx_state->nr_registers = count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist[i],
+ &a6xx_state->registers[index++]);
+
+ if (a6xx_has_gbif(adreno_gpu))
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_gbif_reglist,
+ &a6xx_state->registers[index++]);
+ else
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_vbif_reglist,
+ &a6xx_state->registers[index++]);
+ if (!dumper) {
+ /*
+ * We can't use the crashdumper when the SMMU is stalled,
+ * because the GPU has no memory access until we resume
+ * translation (but we don't want to do that until after
+ * we have captured as much useful GPU state as possible).
+ * So instead collect registers via the CPU:
+ */
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++]);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_crashdumper_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+ a6xx_get_crashdumper_hlsq_registers(gpu,
+ a6xx_state, &a6xx_hlsq_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+}
+
+static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
+{
+ /* The value at [16:31] is in 4dword units. Convert it to dwords */
+ return gpu_read(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2) >> 14;
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_indexed_registers *indexed,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+
+ obj->handle = (const void *) indexed;
+ if (indexed->count_fn)
+ indexed->count = indexed->count_fn(gpu);
+
+ obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ /* All the indexed banks start at address 0 */
+ gpu_write(gpu, indexed->addr, 0);
+
+ /* Read the data - each read increments the internal address by 1 */
+ for (i = 0; i < indexed->count; i++)
+ obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ u32 mempool_size;
+ int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+ int i;
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+ sizeof(*a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
+ u32 val;
+
+ val = gpu_read(gpu, REG_A6XX_CP_CHICKEN_DBG);
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, val | 4);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, val);
+ a6xx_state->nr_indexed_regs = count;
+ return;
+ }
+
+ /* Set the CP mempool size to 0 to stabilize it while dumping */
+ mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ /*
+ * Offset 0x2000 in the mempool is the size - copy the saved size over
+ * so the data is consistent
+ */
+ a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+ /* Restore the size in the hardware */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a6xx_crashdumper _dumper = { 0 }, *dumper = NULL;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+ GFP_KERNEL);
+ bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
+ A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
+
+ if (!a6xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&a6xx_state->objs);
+
+ /* Get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+ if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ a6xx_get_gmu_registers(gpu, a6xx_state);
+
+ a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
+ a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
+ a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
+
+ a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
+ }
+
+ /* If GX isn't on the rest of the data isn't going to be accessible */
+ if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return &a6xx_state->base;
+
+ /* Get the banks of indexed registers */
+ a6xx_get_indexed_registers(gpu, a6xx_state);
+
+ /*
+ * Try to initialize the crashdumper, if we are not dumping state
+ * with the SMMU stalled. The crashdumper needs memory access to
+ * write out GPU state, so we need to skip this when the SMMU is
+ * stalled in response to an iova fault
+ */
+ if (!stalled && !gpu->needs_hw_init &&
+ !a6xx_crashdumper_init(gpu, &_dumper)) {
+ dumper = &_dumper;
+ }
+
+ a6xx_get_registers(gpu, a6xx_state, dumper);
+
+ if (dumper) {
+ a6xx_get_shaders(gpu, a6xx_state, dumper);
+ a6xx_get_clusters(gpu, a6xx_state, dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, dumper);
+
+ msm_gem_kernel_put(dumper->bo, gpu->aspace);
+ }
+
+ if (snapshot_debugbus)
+ a6xx_get_debugbus(gpu, a6xx_state);
+
+ a6xx_state->gpu_initialized = !gpu->needs_hw_init;
+
+ return &a6xx_state->base;
+}
+
+static void a6xx_gpu_state_destroy(struct kref *kref)
+{
+ struct a6xx_state_memobj *obj, *tmp;
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+
+ if (a6xx_state->gmu_log)
+ kvfree(a6xx_state->gmu_log->data);
+
+ if (a6xx_state->gmu_hfi)
+ kvfree(a6xx_state->gmu_hfi->data);
+
+ if (a6xx_state->gmu_debug)
+ kvfree(a6xx_state->gmu_debug->data);
+
+ list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
+ list_del(&obj->node);
+ kvfree(obj);
+ }
+
+ adreno_gpu_state_destroy(state);
+ kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+ struct drm_printer *p)
+{
+ int i, index = 0;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < count; i += 2) {
+ u32 count = RANGE(registers, i);
+ u32 offset = registers[i];
+ int j;
+
+ for (j = 0; j < count; index++, offset++, j++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+ char out[ASCII85_BUFSZ];
+ long i, l, datalen = 0;
+
+ for (i = 0; i < len >> 2; i++) {
+ if (data[i])
+ datalen = (i + 1) << 2;
+ }
+
+ if (datalen == 0)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+
+ l = ascii85_encode_len(datalen);
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(data[i], out));
+
+ drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+ drm_puts(p, fmt);
+ drm_puts(p, name);
+ drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_shader_block *block = obj->handle;
+ int i;
+
+ if (!obj->handle)
+ return;
+
+ print_name(p, " - type: ", block->name);
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ drm_printf(p, " - bank: %d\n", i);
+ drm_printf(p, " size: %d\n", block->size);
+
+ if (!obj->data)
+ continue;
+
+ print_ascii85(p, block->size << 2,
+ obj->data + (block->size * i));
+ }
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+ struct drm_printer *p)
+{
+ int ctx, index = 0;
+
+ for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+ int j;
+
+ drm_printf(p, " - context: %d\n", ctx);
+
+ for (j = 0; j < size; j += 2) {
+ u32 count = RANGE(registers, j);
+ u32 offset = registers[j];
+ int k;
+
+ for (k = 0; k < count; index++, offset++, k++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+ }
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+ if (dbgahb) {
+ print_name(p, " - cluster-name: ", dbgahb->name);
+ a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_cluster *cluster = obj->handle;
+
+ if (cluster) {
+ print_name(p, " - cluster-name: ", cluster->name);
+ a6xx_show_cluster_data(cluster->registers, cluster->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_indexed_registers *indexed = obj->handle;
+
+ if (!indexed)
+ return;
+
+ print_name(p, " - regs-name: ", indexed->name);
+ drm_printf(p, " dwords: %d\n", indexed->count);
+
+ print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+ u32 *data, struct drm_printer *p)
+{
+ if (block) {
+ print_name(p, " - debugbus-block: ", block->name);
+
+ /*
+ * count for regular debugbus data is in quadwords,
+ * but print the size in dwords for consistency
+ */
+ drm_printf(p, " count: %d\n", block->count << 1);
+
+ print_ascii85(p, block->count << 3, data);
+ }
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+ struct drm_printer *p)
+{
+ int i;
+
+ for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+
+ if (a6xx_state->vbif_debugbus) {
+ struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+ drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n");
+ drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+ /* vbif debugbus data is in dwords. Confusing, huh? */
+ print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+ }
+
+ for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "gpu-initialized: %d\n", a6xx_state->gpu_initialized);
+
+ adreno_show(gpu, state, p);
+
+ drm_puts(p, "gmu-log:\n");
+ if (a6xx_state->gmu_log) {
+ struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova);
+ drm_printf(p, " size: %zu\n", gmu_log->size);
+ adreno_show_object(p, &gmu_log->data, gmu_log->size,
+ &gmu_log->encoded);
+ }
+
+ drm_puts(p, "gmu-hfi:\n");
+ if (a6xx_state->gmu_hfi) {
+ struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi;
+ unsigned i, j;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova);
+ drm_printf(p, " size: %zu\n", gmu_hfi->size);
+ for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) {
+ drm_printf(p, " queue-history[%u]:", i);
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]);
+ }
+ drm_printf(p, "\n");
+ }
+ adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size,
+ &gmu_hfi->encoded);
+ }
+
+ drm_puts(p, "gmu-debug:\n");
+ if (a6xx_state->gmu_debug) {
+ struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova);
+ drm_printf(p, " size: %zu\n", gmu_debug->size);
+ adreno_show_object(p, &gmu_debug->data, gmu_debug->size,
+ &gmu_debug->encoded);
+ }
+
+ drm_puts(p, "registers:\n");
+ for (i = 0; i < a6xx_state->nr_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "registers-gmu:\n");
+ for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "indexed-registers:\n");
+ for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+ a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+ drm_puts(p, "shader-blocks:\n");
+ for (i = 0; i < a6xx_state->nr_shaders; i++)
+ a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+ drm_puts(p, "clusters:\n");
+ for (i = 0; i < a6xx_state->nr_clusters; i++)
+ a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+ for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+ a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+ drm_puts(p, "debugbus:\n");
+ a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644
index 0000000000..e788ed72eb
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+ 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+ 0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+ 0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+ 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+ 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+ 0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+ 0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+ 0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+ 0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+ 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+ 0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+ 0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a660_fe_cluster[] = {
+ 0x9807, 0x9807,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+ 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE 0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS 3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS 5
+#define CLUSTER_VPC_PS 6
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+ { .id = _id, .name = #_id,\
+ .registers = _reg, \
+ .count = ARRAY_SIZE(_reg), \
+ .sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+ u32 id;
+ const char *name;
+ const u32 *registers;
+ size_t count;
+ u32 sel_reg;
+ u32 sel_val;
+} a6xx_clusters[] = {
+ CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+ CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a660_fe_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+ 0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+ 0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+ 0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+ 0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+ 0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+ 0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+ 0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+ 0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+ 0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+ 0xaa00, 0xaa00, 0xaa30, 0xaa31, 0xaaf2, 0xaaf2,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+ 0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+ 0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+ 0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+ { .name = #_id, .statetype = _type, .base = _base, \
+ .registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+ const char *name;
+ u32 statetype;
+ u32 base;
+ const u32 *registers;
+ size_t count;
+} a6xx_dbgahb_clusters[] = {
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+ 0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+ 0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+ 0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+ 0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+ 0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+ const u32 *registers;
+ size_t count;
+ u32 val0;
+ u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+ { .val0 = _base, .val1 = _type, .registers = _array, \
+ .count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+ HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+ HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+ HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+ { .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+ const char *name;
+ u32 type;
+ u32 size;
+} a6xx_shader_blocks[] = {
+ SHADER(A6XX_TP0_TMO_DATA, 0x200),
+ SHADER(A6XX_TP0_SMO_DATA, 0x80),
+ SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_TP1_TMO_DATA, 0x200),
+ SHADER(A6XX_TP1_SMO_DATA, 0x80),
+ SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_SP_INST_DATA, 0x800),
+ SHADER(A6XX_SP_LB_0_DATA, 0x800),
+ SHADER(A6XX_SP_LB_1_DATA, 0x800),
+ SHADER(A6XX_SP_LB_2_DATA, 0x800),
+ SHADER(A6XX_SP_LB_3_DATA, 0x800),
+ SHADER(A6XX_SP_LB_4_DATA, 0x800),
+ SHADER(A6XX_SP_LB_5_DATA, 0x200),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
+ SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+ SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_INST_TAG, 0x80),
+ SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+ SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+ SHADER(A6XX_SP_SMO_TAG, 0x80),
+ SHADER(A6XX_SP_STATE_DATA, 0x3f),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+ SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+ SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+ SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+ SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+ SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+ SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+ SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+ SHADER(A6XX_SP_LB_6_DATA, 0x200),
+ SHADER(A6XX_SP_LB_7_DATA, 0x200),
+ SHADER(A6XX_HLSQ_INST_RAM_1, 0x200),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+ 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+ 0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+ 0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+ 0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+ 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+ 0x0540, 0x0555,
+ /* CP */
+ 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0845, 0x084f, 0x086f,
+ 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+ 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+ 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+ 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+ 0x0a00, 0x0a03,
+ /* VSC */
+ 0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+ /* UCHE */
+ 0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+ 0x0e38, 0x0e39,
+ /* GRAS */
+ 0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+ 0x8630, 0x8637,
+ /* VPC */
+ 0x9600, 0x9604, 0x9624, 0x9637,
+ /* PC */
+ 0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+ 0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+ 0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+ /* VFD */
+ 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+ 0xa630, 0xa630,
+ /* HLSQ */
+ 0xd002, 0xd003,
+};
+
+static const u32 a660_registers[] = {
+ /* UCHE */
+ 0x0e3c, 0x0e3c,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+ { .registers = _array, .count = ARRAY_SIZE(_array), \
+ .val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+ REGS(a6xx_registers, 0, 0),
+ REGS(a660_registers, 0, 0),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+ /* RBBM_STATUS - RBBM_STATUS3 */
+ 0x210, 0x213,
+ /* CP_STATUS_1 */
+ 0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+ 0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+ 0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+ 0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+ 0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+ 0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+ 0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const u32 a6xx_gbif_registers[] = {
+ 0x3C00, 0X3C0B, 0X3C40, 0X3C47, 0X3CC0, 0X3CD1, 0xE3A, 0xE3A,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+ REGS(a6xx_ahb_registers, 0, 0),
+};
+
+static const struct a6xx_registers a6xx_vbif_reglist =
+ REGS(a6xx_vbif_registers, 0, 0);
+
+static const struct a6xx_registers a6xx_gbif_reglist =
+ REGS(a6xx_gbif_registers, 0, 0);
+
+static const u32 a6xx_gmu_gx_registers[] = {
+ /* GMU GX */
+ 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+ 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+ 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+ 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+ 0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+ /* GMU CX */
+ 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+ 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+ 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+ 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+ 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+ 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+ 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+ 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+ 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ /* GMU AO */
+ 0x9300, 0x9316, 0x9400, 0x9400,
+ /* GPU CC */
+ 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+ 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+ 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+ 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+ 0xb800, 0xb802,
+ /* GPU CC ACD */
+ 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const u32 a6xx_gmu_cx_rscc_registers[] = {
+ /* GPU RSCC */
+ 0x008c, 0x008c, 0x0101, 0x0102, 0x0340, 0x0342, 0x0344, 0x0347,
+ 0x034c, 0x0387, 0x03ec, 0x03ef, 0x03f4, 0x042f, 0x0494, 0x0497,
+ 0x049c, 0x04d7, 0x053c, 0x053f, 0x0544, 0x057f,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+ REGS(a6xx_gmu_cx_registers, 0, 0),
+ REGS(a6xx_gmu_cx_rscc_registers, 0, 0),
+ REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
+
+static struct a6xx_indexed_registers {
+ const char *name;
+ u32 addr;
+ u32 data;
+ u32 count;
+ u32 (*count_fn)(struct msm_gpu *gpu);
+} a6xx_indexed_reglist[] = {
+ { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
+};
+
+static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+ "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+ const char *name;
+ u32 id;
+ u32 count;
+} a6xx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_gbif_debugbus_block =
+ DEBUGBUS(A6XX_DBGBUS_VBIF, 0x100);
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+static const struct a6xx_debugbus_block a650_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_RB_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_4, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_5, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_4, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_5, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_4, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_5, 0x100),
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
new file mode 100644
index 0000000000..25b235b49e
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/list.h>
+
+#include "a6xx_gmu.h"
+#include "a6xx_gmu.xml.h"
+#include "a6xx_gpu.h"
+
+#define HFI_MSG_ID(val) [val] = #val
+
+static const char * const a6xx_hfi_msg_id[] = {
+ HFI_MSG_ID(HFI_H2F_MSG_INIT),
+ HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
+ HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_TEST),
+ HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
+ HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
+ HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
+};
+
+static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, hdr, index = header->read_index;
+
+ if (header->read_index == header->write_index) {
+ header->rx_request = 1;
+ return 0;
+ }
+
+ hdr = queue->data[index];
+
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
+ /*
+ * If we are to assume that the GMU firmware is in fact a rational actor
+ * and is programmed to not send us a larger response than we expect
+ * then we can also assume that if the header size is unexpectedly large
+ * that it is due to memory corruption and/or hardware failure. In this
+ * case the only reasonable course of action is to BUG() to help harden
+ * the failure.
+ */
+
+ BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
+
+ for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
+ data[i] = queue->data[index];
+ index = (index + 1) % header->size;
+ }
+
+ if (!gmu->legacy)
+ index = ALIGN(index, 4) % header->size;
+
+ header->read_index = index;
+ return HFI_HEADER_SIZE(hdr);
+}
+
+static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, space, index = header->write_index;
+
+ spin_lock(&queue->lock);
+
+ space = CIRC_SPACE(header->write_index, header->read_index,
+ header->size);
+ if (space < dwords) {
+ header->dropped++;
+ spin_unlock(&queue->lock);
+ return -ENOSPC;
+ }
+
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
+ for (i = 0; i < dwords; i++) {
+ queue->data[index] = data[i];
+ index = (index + 1) % header->size;
+ }
+
+ /* Cookify any non used data at the end of the write buffer */
+ if (!gmu->legacy) {
+ for (; index % 4; index = (index + 1) % header->size)
+ queue->data[index] = 0xfafafafa;
+ }
+
+ header->write_index = index;
+ spin_unlock(&queue->lock);
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
+ return 0;
+}
+
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ u32 val;
+ int ret;
+
+ /* Wait for a response */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
+
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ return -ETIMEDOUT;
+ }
+
+ /* Clear the interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
+
+ for (;;) {
+ struct a6xx_hfi_msg_response resp;
+
+ /* Get the next packet */
+ ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
+
+ /* If the queue is empty our response never made it */
+ if (!ret) {
+ DRM_DEV_ERROR(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
+
+ return -ENOENT;
+ }
+
+ if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
+ struct a6xx_hfi_msg_error *error =
+ (struct a6xx_hfi_msg_error *) &resp;
+
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
+ error->code);
+ continue;
+ }
+
+ if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Unexpected message id %d on the response queue\n",
+ HFI_HEADER_SEQNUM(resp.ret_header));
+ continue;
+ }
+
+ if (resp.error) {
+ DRM_DEV_ERROR(gmu->dev,
+ "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
+ }
+
+ /* All is well, copy over the buffer */
+ if (payload && payload_size)
+ memcpy(payload, resp.payload,
+ min_t(u32, payload_size, sizeof(resp.payload)));
+
+ return 0;
+ }
+}
+
+static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
+ void *data, u32 size, u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
+ int ret, dwords = size >> 2;
+ u32 seqnum;
+
+ seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
+
+ /* First dword of the message is the message header - fill it in */
+ *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
+ (dwords << 8) | id;
+
+ ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ return ret;
+ }
+
+ return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
+}
+
+static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
+{
+ struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
+
+ msg.dbg_buffer_addr = (u32) gmu->debug.iova;
+ msg.dbg_buffer_size = (u32) gmu->debug.size;
+ msg.boot_state = boot_state;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
+{
+ struct a6xx_hfi_msg_fw_version msg = { 0 };
+
+ /* Currently supporting version 1.10 */
+ msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
+ version, sizeof(*version));
+}
+
+static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].acd = 0xffffffff;
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5007c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ msg->bw_level_num = 13;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x0;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x50004;
+ msg->ddr_cmds_addrs[2] = 0x50080;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[1][0] = 0x6000030c;
+ msg->ddr_cmds_data[1][1] = 0x600000db;
+ msg->ddr_cmds_data[1][2] = 0x60000008;
+ msg->ddr_cmds_data[2][0] = 0x60000618;
+ msg->ddr_cmds_data[2][1] = 0x600001b6;
+ msg->ddr_cmds_data[2][2] = 0x60000008;
+ msg->ddr_cmds_data[3][0] = 0x60000925;
+ msg->ddr_cmds_data[3][1] = 0x60000291;
+ msg->ddr_cmds_data[3][2] = 0x60000008;
+ msg->ddr_cmds_data[4][0] = 0x60000dc1;
+ msg->ddr_cmds_data[4][1] = 0x600003dc;
+ msg->ddr_cmds_data[4][2] = 0x60000008;
+ msg->ddr_cmds_data[5][0] = 0x600010ad;
+ msg->ddr_cmds_data[5][1] = 0x600004ae;
+ msg->ddr_cmds_data[5][2] = 0x60000008;
+ msg->ddr_cmds_data[6][0] = 0x600014c3;
+ msg->ddr_cmds_data[6][1] = 0x600005d4;
+ msg->ddr_cmds_data[6][2] = 0x60000008;
+ msg->ddr_cmds_data[7][0] = 0x6000176a;
+ msg->ddr_cmds_data[7][1] = 0x60000693;
+ msg->ddr_cmds_data[7][2] = 0x60000008;
+ msg->ddr_cmds_data[8][0] = 0x60001f01;
+ msg->ddr_cmds_data[8][1] = 0x600008b5;
+ msg->ddr_cmds_data[8][2] = 0x60000008;
+ msg->ddr_cmds_data[9][0] = 0x60002940;
+ msg->ddr_cmds_data[9][1] = 0x60000b95;
+ msg->ddr_cmds_data[9][2] = 0x60000008;
+ msg->ddr_cmds_data[10][0] = 0x60002f68;
+ msg->ddr_cmds_data[10][1] = 0x60000d50;
+ msg->ddr_cmds_data[10][2] = 0x60000008;
+ msg->ddr_cmds_data[11][0] = 0x60003700;
+ msg->ddr_cmds_data[11][1] = 0x60000f71;
+ msg->ddr_cmds_data[11][2] = 0x60000008;
+ msg->ddr_cmds_data[12][0] = 0x60003fce;
+ msg->ddr_cmds_data[12][1] = 0x600011ea;
+ msg->ddr_cmds_data[12][2] = 0x60000008;
+
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x0;
+
+ msg->cnoc_cmds_addrs[0] = 0x50054;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+}
+
+static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x50004;
+ msg->ddr_cmds_addrs[2] = 0x5007c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x500a4;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x50000;
+ msg->ddr_cmds_addrs[2] = 0x500ac;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5003c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x500a0;
+ msg->ddr_cmds_addrs[2] = 0x50000;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x50070;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+
+static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /*
+ * Send a single "off" entry just to get things running
+ * TODO: bus scaling
+ */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50004;
+ msg->ddr_cmds_addrs[1] = 0x50000;
+ msg->ddr_cmds_addrs[2] = 0x50088;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
+ */
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5006c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
+static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5005c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes. This is used but the values for the
+ * sdm845 GMU are known and fixed so we can hard code them.
+ */
+
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x05;
+
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
+ if (adreno_is_a618(adreno_gpu))
+ a618_build_bw_table(&msg);
+ else if (adreno_is_a619(adreno_gpu))
+ a619_build_bw_table(&msg);
+ else if (adreno_is_a640_family(adreno_gpu))
+ a640_build_bw_table(&msg);
+ else if (adreno_is_a650(adreno_gpu))
+ a650_build_bw_table(&msg);
+ else if (adreno_is_7c3(adreno_gpu))
+ adreno_7c3_build_bw_table(&msg);
+ else if (adreno_is_a660(adreno_gpu))
+ a660_build_bw_table(&msg);
+ else if (adreno_is_a690(adreno_gpu))
+ a690_build_bw_table(&msg);
+ else
+ a6xx_build_bw_table(&msg);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_test msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_core_fw_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
+
+ msg.ack_type = 1; /* blocking */
+ msg.freq = index;
+ msg.bw = 0; /* TODO: bus scaling */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
+
+ /* TODO: should freq and bw fields be non-zero ? */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_get_fw_version(gmu, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * We have to get exchange version numbers per the sequence but at this
+ * point th kernel driver doesn't need to know the exact version of
+ * the GMU firmware
+ */
+
+ ret = a6xx_hfi_send_perf_table_v1(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Let the GMU know that there won't be any more HFI messages until next
+ * boot
+ */
+ a6xx_hfi_send_test(gmu);
+
+ return 0;
+}
+
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ if (gmu->legacy)
+ return a6xx_hfi_start_v1(gmu, boot_state);
+
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_core_fw_start(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Downstream driver sends this in its "a6xx_hw_init" equivalent,
+ * but seems to be no harm in sending it here
+ */
+ ret = a6xx_hfi_send_start(gmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void a6xx_hfi_stop(struct a6xx_gmu *gmu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+
+ if (!queue->header)
+ continue;
+
+ if (queue->header->read_index != queue->header->write_index)
+ DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
+
+ queue->header->read_index = 0;
+ queue->header->write_index = 0;
+
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
+ }
+}
+
+static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
+ struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
+ u32 id)
+{
+ spin_lock_init(&queue->lock);
+ queue->header = header;
+ queue->data = virt;
+ atomic_set(&queue->seqnum, 0);
+
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
+
+ /* Set up the shared memory header */
+ header->iova = iova;
+ header->type = 10 << 8 | id;
+ header->status = 1;
+ header->size = SZ_4K >> 2;
+ header->msg_size = 0;
+ header->dropped = 0;
+ header->rx_watermark = 1;
+ header->tx_watermark = 1;
+ header->rx_request = 1;
+ header->tx_request = 0;
+ header->read_index = 0;
+ header->write_index = 0;
+}
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gmu_bo *hfi = &gmu->hfi;
+ struct a6xx_hfi_queue_table_header *table = hfi->virt;
+ struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
+ u64 offset;
+ int table_size;
+
+ /*
+ * The table size is the size of the table header plus all of the queue
+ * headers
+ */
+ table_size = sizeof(*table);
+ table_size += (ARRAY_SIZE(gmu->queues) *
+ sizeof(struct a6xx_hfi_queue_header));
+
+ table->version = 0;
+ table->size = table_size;
+ /* First queue header is located immediately after the table header */
+ table->qhdr0_offset = sizeof(*table) >> 2;
+ table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
+ table->num_queues = ARRAY_SIZE(gmu->queues);
+ table->active_queues = ARRAY_SIZE(gmu->queues);
+
+ /* Command queue */
+ offset = SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
+ hfi->iova + offset, 0);
+
+ /* GMU response queue */
+ offset += SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
+ hfi->iova + offset, gmu->legacy ? 4 : 1);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
new file mode 100644
index 0000000000..5281101693
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_HFI_H_
+#define _A6XX_HFI_H_
+
+struct a6xx_hfi_queue_table_header {
+ u32 version;
+ u32 size; /* Size of the queue table in dwords */
+ u32 qhdr0_offset; /* Offset of the first queue header */
+ u32 qhdr_size; /* Size of the queue headers */
+ u32 num_queues; /* Number of total queues */
+ u32 active_queues; /* Number of active queues */
+};
+
+struct a6xx_hfi_queue_header {
+ u32 status;
+ u32 iova;
+ u32 type;
+ u32 size;
+ u32 msg_size;
+ u32 dropped;
+ u32 rx_watermark;
+ u32 tx_watermark;
+ u32 rx_request;
+ u32 tx_request;
+ u32 read_index;
+ u32 write_index;
+};
+
+struct a6xx_hfi_queue {
+ struct a6xx_hfi_queue_header *header;
+ spinlock_t lock;
+ u32 *data;
+ atomic_t seqnum;
+
+ /*
+ * Tracking for the start index of the last N messages in the
+ * queue, for the benefit of devcore dump / crashdec (since
+ * parsing in the reverse direction to decode the last N
+ * messages is difficult to do and would rely on heuristics
+ * which are not guaranteed to be correct)
+ */
+#define HFI_HISTORY_SZ 8
+ s32 history[HFI_HISTORY_SZ];
+ u8 history_idx;
+};
+
+/* This is the outgoing queue to the GMU */
+#define HFI_COMMAND_QUEUE 0
+
+/* THis is the incoming response queue from the GMU */
+#define HFI_RESPONSE_QUEUE 1
+
+#define HFI_HEADER_ID(msg) ((msg) & 0xff)
+#define HFI_HEADER_SIZE(msg) (((msg) >> 8) & 0xff)
+#define HFI_HEADER_SEQNUM(msg) (((msg) >> 20) & 0xfff)
+
+/* FIXME: Do we need this or can we use ARRAY_SIZE? */
+#define HFI_RESPONSE_PAYLOAD_SIZE 16
+
+/* HFI message types */
+
+#define HFI_MSG_CMD 0
+#define HFI_MSG_ACK 1
+#define HFI_MSG_ACK_V1 2
+
+#define HFI_F2H_MSG_ACK 126
+
+struct a6xx_hfi_msg_response {
+ u32 header;
+ u32 ret_header;
+ u32 error;
+ u32 payload[HFI_RESPONSE_PAYLOAD_SIZE];
+};
+
+#define HFI_F2H_MSG_ERROR 100
+
+struct a6xx_hfi_msg_error {
+ u32 header;
+ u32 code;
+ u32 payload[2];
+};
+
+#define HFI_H2F_MSG_INIT 0
+
+struct a6xx_hfi_msg_gmu_init_cmd {
+ u32 header;
+ u32 seg_id;
+ u32 dbg_buffer_addr;
+ u32 dbg_buffer_size;
+ u32 boot_state;
+};
+
+#define HFI_H2F_MSG_FW_VERSION 1
+
+struct a6xx_hfi_msg_fw_version {
+ u32 header;
+ u32 supported_version;
+};
+
+#define HFI_H2F_MSG_PERF_TABLE 4
+
+struct perf_level {
+ u32 vote;
+ u32 freq;
+};
+
+struct perf_gx_level {
+ u32 vote;
+ u32 acd;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table_v1 {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_gx_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+#define HFI_H2F_MSG_BW_TABLE 3
+
+struct a6xx_hfi_msg_bw_table {
+ u32 header;
+ u32 bw_level_num;
+ u32 cnoc_cmds_num;
+ u32 ddr_cmds_num;
+ u32 cnoc_wait_bitmask;
+ u32 ddr_wait_bitmask;
+ u32 cnoc_cmds_addrs[6];
+ u32 cnoc_cmds_data[2][6];
+ u32 ddr_cmds_addrs[8];
+ u32 ddr_cmds_data[16][8];
+};
+
+#define HFI_H2F_MSG_TEST 5
+
+struct a6xx_hfi_msg_test {
+ u32 header;
+};
+
+#define HFI_H2F_MSG_START 10
+
+struct a6xx_hfi_msg_start {
+ u32 header;
+};
+
+#define HFI_H2F_MSG_CORE_FW_START 14
+
+struct a6xx_hfi_msg_core_fw_start {
+ u32 header;
+ u32 handle;
+};
+
+#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
+
+struct a6xx_hfi_gx_bw_perf_vote_cmd {
+ u32 header;
+ u32 ack_type;
+ u32 freq;
+ u32 bw;
+};
+
+#define HFI_H2F_MSG_PREPARE_SLUMBER 33
+
+struct a6xx_hfi_prep_slumber_cmd {
+ u32 header;
+ u32 bw;
+ u32 freq;
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 0000000000..51c320a2e5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,697 @@
+#ifndef ADRENO_COMMON_XML
+#define ADRENO_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2023 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum chip {
+ A2XX = 2,
+ A3XX = 3,
+ A4XX = 4,
+ A5XX = 5,
+ A6XX = 6,
+ A7XX = 7,
+};
+
+enum adreno_pa_su_sc_draw {
+ PC_DRAW_POINTS = 0,
+ PC_DRAW_LINES = 1,
+ PC_DRAW_TRIANGLES = 2,
+};
+
+enum adreno_compare_func {
+ FUNC_NEVER = 0,
+ FUNC_LESS = 1,
+ FUNC_EQUAL = 2,
+ FUNC_LEQUAL = 3,
+ FUNC_GREATER = 4,
+ FUNC_NOTEQUAL = 5,
+ FUNC_GEQUAL = 6,
+ FUNC_ALWAYS = 7,
+};
+
+enum adreno_stencil_op {
+ STENCIL_KEEP = 0,
+ STENCIL_ZERO = 1,
+ STENCIL_REPLACE = 2,
+ STENCIL_INCR_CLAMP = 3,
+ STENCIL_DECR_CLAMP = 4,
+ STENCIL_INVERT = 5,
+ STENCIL_INCR_WRAP = 6,
+ STENCIL_DECR_WRAP = 7,
+};
+
+enum adreno_rb_blend_factor {
+ FACTOR_ZERO = 0,
+ FACTOR_ONE = 1,
+ FACTOR_SRC_COLOR = 4,
+ FACTOR_ONE_MINUS_SRC_COLOR = 5,
+ FACTOR_SRC_ALPHA = 6,
+ FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ FACTOR_DST_COLOR = 8,
+ FACTOR_ONE_MINUS_DST_COLOR = 9,
+ FACTOR_DST_ALPHA = 10,
+ FACTOR_ONE_MINUS_DST_ALPHA = 11,
+ FACTOR_CONSTANT_COLOR = 12,
+ FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
+ FACTOR_CONSTANT_ALPHA = 14,
+ FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
+ FACTOR_SRC_ALPHA_SATURATE = 16,
+ FACTOR_SRC1_COLOR = 20,
+ FACTOR_ONE_MINUS_SRC1_COLOR = 21,
+ FACTOR_SRC1_ALPHA = 22,
+ FACTOR_ONE_MINUS_SRC1_ALPHA = 23,
+};
+
+enum adreno_rb_surface_endian {
+ ENDIAN_NONE = 0,
+ ENDIAN_8IN16 = 1,
+ ENDIAN_8IN32 = 2,
+ ENDIAN_16IN32 = 3,
+ ENDIAN_8IN64 = 4,
+ ENDIAN_8IN128 = 5,
+};
+
+enum adreno_rb_dither_mode {
+ DITHER_DISABLE = 0,
+ DITHER_ALWAYS = 1,
+ DITHER_IF_ALPHA_OFF = 2,
+};
+
+enum adreno_rb_depth_format {
+ DEPTHX_16 = 0,
+ DEPTHX_24_8 = 1,
+ DEPTHX_32 = 2,
+};
+
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_CLEAR = 2,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+ RB_COMPUTE_PASS = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+ MSAA_EIGHT = 3,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ CACHE = 0,
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
+enum a3xx_color_swap {
+ WZYX = 0,
+ WXYZ = 1,
+ ZYXW = 2,
+ XYZW = 3,
+};
+
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
+enum a4xx_tess_spacing {
+ EQUAL_SPACING = 0,
+ ODD_SPACING = 2,
+ EVEN_SPACING = 3,
+};
+
+enum a5xx_address_mode {
+ ADDR_32B = 0,
+ ADDR_64B = 1,
+};
+
+enum a5xx_line_mode {
+ BRESENHAM = 0,
+ RECTANGULAR = 1,
+};
+
+enum a6xx_tex_prefetch_cmd {
+ TEX_PREFETCH_UNK0 = 0,
+ TEX_PREFETCH_SAM = 1,
+ TEX_PREFETCH_GATHER4R = 2,
+ TEX_PREFETCH_GATHER4G = 3,
+ TEX_PREFETCH_GATHER4B = 4,
+ TEX_PREFETCH_GATHER4A = 5,
+ TEX_PREFETCH_UNK6 = 6,
+ TEX_PREFETCH_UNK7 = 7,
+};
+
+#define REG_AXXX_CP_RB_BASE 0x000001c0
+
+#define REG_AXXX_CP_RB_CNTL 0x000001c1
+#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
+#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
+static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
+#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
+static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
+#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
+static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
+}
+#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
+#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
+#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
+
+#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
+}
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
+}
+
+#define REG_AXXX_CP_RB_RPTR 0x000001c4
+
+#define REG_AXXX_CP_RB_WPTR 0x000001c5
+
+#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
+
+#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
+
+#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
+
+#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK 0x001f0000
+#define AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT 16
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_MEQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_MEQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_MEQ_END__MASK;
+}
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK 0x1f000000
+#define AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT 24
+static inline uint32_t AXXX_CP_MEQ_THRESHOLDS_ROQ_END(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_THRESHOLDS_ROQ_END__SHIFT) & AXXX_CP_MEQ_THRESHOLDS_ROQ_END__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
+#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
+#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
+#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
+#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
+}
+
+#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
+#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
+#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
+static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
+{
+ return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
+#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
+#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
+static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
+}
+
+#define REG_AXXX_SCRATCH_UMSK 0x000001dc
+#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
+#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
+static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
+}
+#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
+#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
+static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
+}
+
+#define REG_AXXX_SCRATCH_ADDR 0x000001dd
+
+#define REG_AXXX_CP_ME_RDADDR 0x000001ea
+
+#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
+
+#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
+
+#define REG_AXXX_CP_INT_CNTL 0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
+
+#define REG_AXXX_CP_INT_STATUS 0x000001f3
+
+#define REG_AXXX_CP_INT_ACK 0x000001f4
+
+#define REG_AXXX_CP_ME_CNTL 0x000001f6
+#define AXXX_CP_ME_CNTL_BUSY 0x20000000
+#define AXXX_CP_ME_CNTL_HALT 0x10000000
+
+#define REG_AXXX_CP_ME_STATUS 0x000001f7
+
+#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
+
+#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
+
+#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
+
+#define REG_AXXX_CP_DEBUG 0x000001fc
+#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
+#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
+#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
+#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
+#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
+#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
+#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
+#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
+
+#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
+#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_NON_PREFETCH_CNTRS 0x00000440
+
+#define REG_AXXX_CP_STQ_ST_STAT 0x00000443
+
+#define REG_AXXX_CP_ST_BASE 0x0000044d
+
+#define REG_AXXX_CP_ST_BUFSZ 0x0000044e
+
+#define REG_AXXX_CP_MEQ_STAT 0x0000044f
+
+#define REG_AXXX_CP_MIU_TAG_STAT 0x00000452
+
+#define REG_AXXX_CP_BIN_MASK_LO 0x00000454
+
+#define REG_AXXX_CP_BIN_MASK_HI 0x00000455
+
+#define REG_AXXX_CP_BIN_SELECT_LO 0x00000456
+
+#define REG_AXXX_CP_BIN_SELECT_HI 0x00000457
+
+#define REG_AXXX_CP_IB1_BASE 0x00000458
+
+#define REG_AXXX_CP_IB1_BUFSZ 0x00000459
+
+#define REG_AXXX_CP_IB2_BASE 0x0000045a
+
+#define REG_AXXX_CP_IB2_BUFSZ 0x0000045b
+
+#define REG_AXXX_CP_STAT 0x0000047f
+#define AXXX_CP_STAT_CP_BUSY__MASK 0x80000000
+#define AXXX_CP_STAT_CP_BUSY__SHIFT 31
+static inline uint32_t AXXX_CP_STAT_CP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_BUSY__SHIFT) & AXXX_CP_STAT_CP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK 0x40000000
+#define AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT 30
+static inline uint32_t AXXX_CP_STAT_VS_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_VS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK 0x20000000
+#define AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT 29
+static inline uint32_t AXXX_CP_STAT_PS_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_PS_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK 0x10000000
+#define AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT 28
+static inline uint32_t AXXX_CP_STAT_CF_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_CF_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK 0x08000000
+#define AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT 27
+static inline uint32_t AXXX_CP_STAT_RB_EVENT_FIFO_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__SHIFT) & AXXX_CP_STAT_RB_EVENT_FIFO_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ME_BUSY__MASK 0x04000000
+#define AXXX_CP_STAT_ME_BUSY__SHIFT 26
+static inline uint32_t AXXX_CP_STAT_ME_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_ME_BUSY__SHIFT) & AXXX_CP_STAT_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__MASK 0x02000000
+#define AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT 25
+static inline uint32_t AXXX_CP_STAT_MIU_WR_C_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_WR_C_BUSY__SHIFT) & AXXX_CP_STAT_MIU_WR_C_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_3D_BUSY__MASK 0x00800000
+#define AXXX_CP_STAT_CP_3D_BUSY__SHIFT 23
+static inline uint32_t AXXX_CP_STAT_CP_3D_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_3D_BUSY__SHIFT) & AXXX_CP_STAT_CP_3D_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CP_NRT_BUSY__MASK 0x00400000
+#define AXXX_CP_STAT_CP_NRT_BUSY__SHIFT 22
+static inline uint32_t AXXX_CP_STAT_CP_NRT_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CP_NRT_BUSY__SHIFT) & AXXX_CP_STAT_CP_NRT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK 0x00200000
+#define AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT 21
+static inline uint32_t AXXX_CP_STAT_RBIU_SCRATCH_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RBIU_SCRATCH_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_SCRATCH_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_ME_BUSY__MASK 0x00100000
+#define AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT 20
+static inline uint32_t AXXX_CP_STAT_RCIU_ME_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_ME_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_ME_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__MASK 0x00080000
+#define AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT 19
+static inline uint32_t AXXX_CP_STAT_RCIU_PFP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_PFP_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MEQ_RING_BUSY__MASK 0x00040000
+#define AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT 18
+static inline uint32_t AXXX_CP_STAT_MEQ_RING_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MEQ_RING_BUSY__SHIFT) & AXXX_CP_STAT_MEQ_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_PFP_BUSY__MASK 0x00020000
+#define AXXX_CP_STAT_PFP_BUSY__SHIFT 17
+static inline uint32_t AXXX_CP_STAT_PFP_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_PFP_BUSY__SHIFT) & AXXX_CP_STAT_PFP_BUSY__MASK;
+}
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__MASK 0x00010000
+#define AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT 16
+static inline uint32_t AXXX_CP_STAT_ST_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_ST_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_ST_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK 0x00002000
+#define AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT 13
+static inline uint32_t AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECT2_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK 0x00001000
+#define AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT 12
+static inline uint32_t AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_INDIRECTS_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__MASK 0x00000800
+#define AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT 11
+static inline uint32_t AXXX_CP_STAT_RING_QUEUE_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RING_QUEUE_BUSY__SHIFT) & AXXX_CP_STAT_RING_QUEUE_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_BUSY__MASK 0x00000400
+#define AXXX_CP_STAT_CSF_BUSY__SHIFT 10
+static inline uint32_t AXXX_CP_STAT_CSF_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_BUSY__SHIFT) & AXXX_CP_STAT_CSF_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_ST_BUSY__MASK 0x00000200
+#define AXXX_CP_STAT_CSF_ST_BUSY__SHIFT 9
+static inline uint32_t AXXX_CP_STAT_CSF_ST_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_ST_BUSY__SHIFT) & AXXX_CP_STAT_CSF_ST_BUSY__MASK;
+}
+#define AXXX_CP_STAT_EVENT_BUSY__MASK 0x00000100
+#define AXXX_CP_STAT_EVENT_BUSY__SHIFT 8
+static inline uint32_t AXXX_CP_STAT_EVENT_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_EVENT_BUSY__SHIFT) & AXXX_CP_STAT_EVENT_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK 0x00000080
+#define AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT 7
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECT2_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_INDIRECT2_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECT2_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK 0x00000040
+#define AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT 6
+static inline uint32_t AXXX_CP_STAT_CSF_INDIRECTS_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_INDIRECTS_BUSY__SHIFT) & AXXX_CP_STAT_CSF_INDIRECTS_BUSY__MASK;
+}
+#define AXXX_CP_STAT_CSF_RING_BUSY__MASK 0x00000020
+#define AXXX_CP_STAT_CSF_RING_BUSY__SHIFT 5
+static inline uint32_t AXXX_CP_STAT_CSF_RING_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_CSF_RING_BUSY__SHIFT) & AXXX_CP_STAT_CSF_RING_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RCIU_BUSY__MASK 0x00000010
+#define AXXX_CP_STAT_RCIU_BUSY__SHIFT 4
+static inline uint32_t AXXX_CP_STAT_RCIU_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RCIU_BUSY__SHIFT) & AXXX_CP_STAT_RCIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_RBIU_BUSY__MASK 0x00000008
+#define AXXX_CP_STAT_RBIU_BUSY__SHIFT 3
+static inline uint32_t AXXX_CP_STAT_RBIU_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_RBIU_BUSY__SHIFT) & AXXX_CP_STAT_RBIU_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK 0x00000004
+#define AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT 2
+static inline uint32_t AXXX_CP_STAT_MIU_RD_RETURN_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_RD_RETURN_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_RETURN_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK 0x00000002
+#define AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT 1
+static inline uint32_t AXXX_CP_STAT_MIU_RD_REQ_BUSY(uint32_t val)
+{
+ return ((val) << AXXX_CP_STAT_MIU_RD_REQ_BUSY__SHIFT) & AXXX_CP_STAT_MIU_RD_REQ_BUSY__MASK;
+}
+#define AXXX_CP_STAT_MIU_WR_BUSY 0x00000001
+
+#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
+
+#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
+
+#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
+
+#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
+
+#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
+
+#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
+
+#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
+
+#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
+
+#define REG_AXXX_CP_ME_VS_EVENT_SRC 0x00000600
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR 0x00000601
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA 0x00000602
+
+#define REG_AXXX_CP_ME_VS_EVENT_ADDR_SWM 0x00000603
+
+#define REG_AXXX_CP_ME_VS_EVENT_DATA_SWM 0x00000604
+
+#define REG_AXXX_CP_ME_PS_EVENT_SRC 0x00000605
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR 0x00000606
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA 0x00000607
+
+#define REG_AXXX_CP_ME_PS_EVENT_ADDR_SWM 0x00000608
+
+#define REG_AXXX_CP_ME_PS_EVENT_DATA_SWM 0x00000609
+
+#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
+
+#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
+
+#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
+
+#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
+
+#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_SRC 0x00000612
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_ADDR 0x00000613
+
+#define REG_AXXX_CP_ME_VS_FETCH_DONE_DATA 0x00000614
+
+
+#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
new file mode 100644
index 0000000000..b7b527e21d
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013-2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ */
+
+#include "adreno_gpu.h"
+
+bool hang_debug = false;
+MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
+module_param_named(hang_debug, hang_debug, bool, 0600);
+
+bool snapshot_debugbus = false;
+MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
+module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
+static const struct adreno_info gpulist[] = {
+ {
+ .chip_ids = ADRENO_CHIP_IDS(0x02000000),
+ .family = ADRENO_2XX_GEN1,
+ .revn = 200,
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, { /* a200 on i.mx51 has only 128kib gmem */
+ .chip_ids = ADRENO_CHIP_IDS(0x02000001),
+ .family = ADRENO_2XX_GEN1,
+ .revn = 201,
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x02020000),
+ .family = ADRENO_2XX_GEN2,
+ .revn = 220,
+ .fw = {
+ [ADRENO_FW_PM4] = "leia_pm4_470.fw",
+ [ADRENO_FW_PFP] = "leia_pfp_470.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x03000512,
+ 0x03000520
+ ),
+ .family = ADRENO_3XX,
+ .revn = 305,
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x03000600),
+ .family = ADRENO_3XX,
+ .revn = 307, /* because a305c is revn==306 */
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x03020000,
+ 0x03020001,
+ 0x03020002
+ ),
+ .family = ADRENO_3XX,
+ .revn = 320,
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x03030000,
+ 0x03030001,
+ 0x03030002
+ ),
+ .family = ADRENO_3XX,
+ .revn = 330,
+ .fw = {
+ [ADRENO_FW_PM4] = "a330_pm4.fw",
+ [ADRENO_FW_PFP] = "a330_pfp.fw",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x04000500),
+ .family = ADRENO_4XX,
+ .revn = 405,
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x04020000),
+ .family = ADRENO_4XX,
+ .revn = 420,
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x04030002),
+ .family = ADRENO_4XX,
+ .revn = 430,
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x05000600),
+ .family = ADRENO_5XX,
+ .revn = 506,
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+ ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a506_zap.mdt",
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x05000800),
+ .family = ADRENO_5XX,
+ .revn = 508,
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_128K + SZ_8K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a508_zap.mdt",
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x05000900),
+ .family = ADRENO_5XX,
+ .revn = 509,
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ /* Adreno 509 uses the same ZAP as 512 */
+ .zapfw = "a512_zap.mdt",
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x05010000),
+ .family = ADRENO_5XX,
+ .revn = 510,
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .init = a5xx_gpu_init,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x05010200),
+ .family = ADRENO_5XX,
+ .revn = 512,
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ },
+ .gmem = (SZ_256K + SZ_16K),
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a512_zap.mdt",
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x05030002,
+ 0x05030004
+ ),
+ .family = ADRENO_5XX,
+ .revn = 530,
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
+ },
+ .gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
+ ADRENO_QUIRK_FAULT_DETECT_MASK,
+ .init = a5xx_gpu_init,
+ .zapfw = "a530_zap.mdt",
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x05040001),
+ .family = ADRENO_5XX,
+ .revn = 540,
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a540_gpmu.fw2",
+ },
+ .gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
+ .quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
+ .init = a5xx_gpu_init,
+ .zapfw = "a540_zap.mdt",
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010000),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 610,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ },
+ .gmem = (SZ_128K + SZ_4K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a610_zap.mdt",
+ .hwcg = a612_hwcg,
+ /*
+ * There are (at least) three SoCs implementing A610: SM6125
+ * (trinket), SM6115 (bengal) and SM6225 (khaje). Trinket does
+ * not have speedbinning, as only a single SKU exists and we
+ * don't support khaje upstream yet. Hence, this matching
+ * table is only valid for bengal.
+ */
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 206, 1 },
+ { 200, 2 },
+ { 157, 3 },
+ { 127, 4 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010800),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 618,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .init = a6xx_gpu_init,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 169, 1 },
+ { 174, 2 },
+ ),
+ }, {
+ .machine = "qcom,sm4350",
+ .chip_ids = ADRENO_CHIP_IDS(0x06010900),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 619,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a619_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .hwcg = a615_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 138, 1 },
+ { 92, 2 },
+ ),
+ }, {
+ .machine = "qcom,sm6375",
+ .chip_ids = ADRENO_CHIP_IDS(0x06010901),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 619,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a619_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .hwcg = a615_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 190, 1 },
+ { 177, 2 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010900),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 619,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a619_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .hwcg = a615_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 120, 4 },
+ { 138, 3 },
+ { 169, 2 },
+ { 180, 1 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(
+ 0x06030001,
+ 0x06030002
+ ),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 630,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .init = a6xx_gpu_init,
+ .zapfw = "a630_zap.mdt",
+ .hwcg = a630_hwcg,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06040001),
+ .family = ADRENO_6XX_GEN2,
+ .revn = 640,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ .hwcg = a640_hwcg,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 1, 1 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06050002),
+ .family = ADRENO_6XX_GEN3,
+ .revn = 650,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a650_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a650_zap.mdt",
+ .hwcg = a650_hwcg,
+ .address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 1, 1 },
+ { 2, 3 }, /* Yep, 2 and 3 are swapped! :/ */
+ { 3, 2 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06060001),
+ .family = ADRENO_6XX_GEN4,
+ .revn = 660,
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a660_zap.mdt",
+ .hwcg = a660_hwcg,
+ .address_space_size = SZ_16G,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06030500),
+ .family = ADRENO_6XX_GEN4,
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .hwcg = a660_hwcg,
+ .address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 117, 0 },
+ { 190, 1 },
+ ),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06080001),
+ .family = ADRENO_6XX_GEN2,
+ .revn = 680,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_2M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ .hwcg = a640_hwcg,
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06090000),
+ .family = ADRENO_6XX_GEN4,
+ .fw = {
+ [ADRENO_FW_SQE] = "a660_sqe.fw",
+ [ADRENO_FW_GMU] = "a660_gmu.bin",
+ },
+ .gmem = SZ_4M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a690_zap.mdt",
+ .hwcg = a690_hwcg,
+ .address_space_size = SZ_16G,
+ },
+};
+
+MODULE_FIRMWARE("qcom/a300_pm4.fw");
+MODULE_FIRMWARE("qcom/a300_pfp.fw");
+MODULE_FIRMWARE("qcom/a330_pm4.fw");
+MODULE_FIRMWARE("qcom/a330_pfp.fw");
+MODULE_FIRMWARE("qcom/a420_pm4.fw");
+MODULE_FIRMWARE("qcom/a420_pfp.fw");
+MODULE_FIRMWARE("qcom/a530_pm4.fw");
+MODULE_FIRMWARE("qcom/a530_pfp.fw");
+MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
+MODULE_FIRMWARE("qcom/a530_zap.mdt");
+MODULE_FIRMWARE("qcom/a530_zap.b00");
+MODULE_FIRMWARE("qcom/a530_zap.b01");
+MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
+MODULE_FIRMWARE("qcom/a619_gmu.bin");
+MODULE_FIRMWARE("qcom/a630_sqe.fw");
+MODULE_FIRMWARE("qcom/a630_gmu.bin");
+MODULE_FIRMWARE("qcom/a630_zap.mbn");
+MODULE_FIRMWARE("qcom/a640_gmu.bin");
+MODULE_FIRMWARE("qcom/a650_gmu.bin");
+MODULE_FIRMWARE("qcom/a650_sqe.fw");
+MODULE_FIRMWARE("qcom/a660_gmu.bin");
+MODULE_FIRMWARE("qcom/a660_sqe.fw");
+MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
+MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
+MODULE_FIRMWARE("qcom/yamato_pfp.fw");
+MODULE_FIRMWARE("qcom/yamato_pm4.fw");
+
+static const struct adreno_info *adreno_info(uint32_t chip_id)
+{
+ /* identify gpu: */
+ for (int i = 0; i < ARRAY_SIZE(gpulist); i++) {
+ const struct adreno_info *info = &gpulist[i];
+ if (info->machine && !of_machine_is_compatible(info->machine))
+ continue;
+ for (int j = 0; info->chip_ids[j]; j++)
+ if (info->chip_ids[j] == chip_id)
+ return info;
+ }
+
+ return NULL;
+}
+
+struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu *gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ int ret;
+
+ if (pdev)
+ gpu = dev_to_gpu(&pdev->dev);
+
+ if (!gpu) {
+ dev_err_once(dev->dev, "no GPU device was found\n");
+ return NULL;
+ }
+
+ adreno_gpu = to_adreno_gpu(gpu);
+
+ /*
+ * The number one reason for HW init to fail is if the firmware isn't
+ * loaded yet. Try that first and don't bother continuing on
+ * otherwise
+ */
+
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
+ return NULL;
+
+ if (gpu->funcs->ucode_load) {
+ ret = gpu->funcs->ucode_load(gpu);
+ if (ret)
+ return NULL;
+ }
+
+ /*
+ * Now that we have firmware loaded, and are ready to begin
+ * booting the gpu, go ahead and enable runpm:
+ */
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ goto err_disable_rpm;
+ }
+
+ mutex_lock(&gpu->lock);
+ ret = msm_gpu_hw_init(gpu);
+ mutex_unlock(&gpu->lock);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
+ goto err_put_rpm;
+ }
+
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+#ifdef CONFIG_DEBUG_FS
+ if (gpu->funcs->debugfs_init) {
+ gpu->funcs->debugfs_init(gpu, dev->primary);
+ gpu->funcs->debugfs_init(gpu, dev->render);
+ }
+#endif
+
+ return gpu;
+
+err_put_rpm:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+err_disable_rpm:
+ pm_runtime_disable(&pdev->dev);
+
+ return NULL;
+}
+
+static int find_chipid(struct device *dev, uint32_t *chipid)
+{
+ struct device_node *node = dev->of_node;
+ const char *compat;
+ int ret;
+
+ /* first search the compat strings for qcom,adreno-XYZ.W: */
+ ret = of_property_read_string_index(node, "compatible", 0, &compat);
+ if (ret == 0) {
+ unsigned int r, patch;
+
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+ sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
+ uint32_t core, major, minor;
+
+ core = r / 100;
+ r %= 100;
+ major = r / 10;
+ r %= 10;
+ minor = r;
+
+ *chipid = (core << 24) |
+ (major << 16) |
+ (minor << 8) |
+ patch;
+
+ return 0;
+ }
+
+ if (sscanf(compat, "qcom,adreno-%08x", chipid) == 1)
+ return 0;
+ }
+
+ /* and if that fails, fall back to legacy "qcom,chipid" property: */
+ ret = of_property_read_u32(node, "qcom,chipid", chipid);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
+ return ret;
+ }
+
+ dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+
+ return 0;
+}
+
+static int adreno_bind(struct device *dev, struct device *master, void *data)
+{
+ static struct adreno_platform_config config = {};
+ const struct adreno_info *info;
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct drm_device *drm = priv->dev;
+ struct msm_gpu *gpu;
+ int ret;
+
+ ret = find_chipid(dev, &config.chip_id);
+ if (ret)
+ return ret;
+
+ dev->platform_data = &config;
+ priv->gpu_pdev = to_platform_device(dev);
+
+ info = adreno_info(config.chip_id);
+ if (!info) {
+ dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
+ ADRENO_CHIPID_ARGS(config.chip_id));
+ return -ENXIO;
+ }
+
+ config.info = info;
+
+ DBG("Found GPU: %"ADRENO_CHIPID_FMT, ADRENO_CHIPID_ARGS(config.chip_id));
+
+ priv->is_a2xx = info->family < ADRENO_3XX;
+ priv->has_cached_coherent =
+ !!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
+
+ gpu = info->init(drm);
+ if (IS_ERR(gpu)) {
+ dev_warn(drm->dev, "failed to load adreno gpu\n");
+ return PTR_ERR(gpu);
+ }
+
+ ret = dev_pm_opp_of_find_icc_paths(dev, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int adreno_system_suspend(struct device *dev);
+static void adreno_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ if (pm_runtime_enabled(dev))
+ WARN_ON_ONCE(adreno_system_suspend(dev));
+ gpu->funcs->destroy(gpu);
+
+ priv->gpu_pdev = NULL;
+}
+
+static const struct component_ops a3xx_ops = {
+ .bind = adreno_bind,
+ .unbind = adreno_unbind,
+};
+
+static void adreno_device_register_headless(void)
+{
+ /* on imx5, we don't have a top-level mdp/dpu node
+ * this creates a dummy node for the driver for that case
+ */
+ struct platform_device_info dummy_info = {
+ .parent = NULL,
+ .name = "msm",
+ .id = -1,
+ .res = NULL,
+ .num_res = 0,
+ .data = NULL,
+ .size_data = 0,
+ .dma_mask = ~0,
+ };
+ platform_device_register_full(&dummy_info);
+}
+
+static int adreno_probe(struct platform_device *pdev)
+{
+
+ int ret;
+
+ ret = component_add(&pdev->dev, &a3xx_ops);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+ adreno_device_register_headless();
+
+ return 0;
+}
+
+static int adreno_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &a3xx_ops);
+ return 0;
+}
+
+static void adreno_shutdown(struct platform_device *pdev)
+{
+ WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,adreno" },
+ { .compatible = "qcom,adreno-3xx" },
+ /* for compatibility with imx5 gpu: */
+ { .compatible = "amd,imageon" },
+ /* for backwards compat w/ downstream kgsl DT files: */
+ { .compatible = "qcom,kgsl-3d0" },
+ {}
+};
+
+static int adreno_runtime_resume(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_runtime_suspend(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ /*
+ * We should be holding a runpm ref, which will prevent
+ * runtime suspend. In the system suspend path, we've
+ * already waited for active jobs to complete.
+ */
+ WARN_ON_ONCE(gpu->active_submits);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ /*
+ * Shut down the scheduler before we force suspend, so that
+ * suspend isn't racing with scheduler kthread feeding us
+ * more work.
+ *
+ * Note, we just want to park the thread, and let any jobs
+ * that are already on the hw queue complete normally, as
+ * opposed to the drm_sched_stop() path used for handling
+ * faulting/timed-out jobs. We can't really cancel any jobs
+ * already on the hw queue without racing with the GPU.
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_park(sched->thread);
+ }
+}
+
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_unpark(sched->thread);
+ }
+}
+
+static int adreno_system_suspend(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+ int remaining, ret;
+
+ if (!gpu)
+ return 0;
+
+ suspend_scheduler(gpu);
+
+ remaining = wait_event_timeout(gpu->retire_event,
+ gpu->active_submits == 0,
+ msecs_to_jiffies(1000));
+ if (remaining == 0) {
+ dev_err(dev, "Timeout waiting for GPU to suspend\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = pm_runtime_force_suspend(dev);
+out:
+ if (ret)
+ resume_scheduler(gpu);
+
+ return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ if (!gpu)
+ return 0;
+
+ resume_scheduler(gpu);
+ return pm_runtime_force_resume(dev);
+}
+
+static const struct dev_pm_ops adreno_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+ RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
+};
+
+static struct platform_driver adreno_driver = {
+ .probe = adreno_probe,
+ .remove = adreno_remove,
+ .shutdown = adreno_shutdown,
+ .driver = {
+ .name = "adreno",
+ .of_match_table = dt_match,
+ .pm = &adreno_pm_ops,
+ },
+};
+
+void __init adreno_register(void)
+{
+ platform_driver_register(&adreno_driver);
+}
+
+void __exit adreno_unregister(void)
+{
+ platform_driver_unregister(&adreno_driver);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 0000000000..8090dde032
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ascii85.h>
+#include <linux/interconnect.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/nvmem-consumer.h>
+#include <soc/qcom/ocmem.h>
+#include "adreno_gpu.h"
+#include "a6xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+static u64 address_space_size = 0;
+MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
+module_param(address_space_size, ullong, 0600);
+
+static bool zap_available = true;
+
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ u32 pasid)
+{
+ struct device *dev = &gpu->pdev->dev;
+ const struct firmware *fw;
+ const char *signed_fwname = NULL;
+ struct device_node *np, *mem_np;
+ struct resource r;
+ phys_addr_t mem_phys;
+ ssize_t mem_size;
+ void *mem_region = NULL;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np) {
+ zap_available = false;
+ return -ENODEV;
+ }
+
+ mem_np = of_parse_phandle(np, "memory-region", 0);
+ of_node_put(np);
+ if (!mem_np) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(mem_np, 0, &r);
+ of_node_put(mem_np);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+
+ /*
+ * Check for a firmware-name property. This is the new scheme
+ * to handle firmware that may be signed with device specific
+ * keys, allowing us to have a different zap fw path for different
+ * devices.
+ *
+ * If the firmware-name property is found, we bypass the
+ * adreno_request_fw() mechanism, because we don't need to handle
+ * the /lib/firmware/qcom/... vs /lib/firmware/... case.
+ *
+ * If the firmware-name property is not found, for backwards
+ * compatibility we fall back to the fwname from the gpulist
+ * table.
+ */
+ of_property_read_string_index(np, "firmware-name", 0, &signed_fwname);
+ if (signed_fwname) {
+ fwname = signed_fwname;
+ ret = request_firmware_direct(&fw, fwname, gpu->dev->dev);
+ if (ret)
+ fw = ERR_PTR(ret);
+ } else if (fwname) {
+ /* Request the MDT file from the default location: */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ } else {
+ /*
+ * For new targets, we require the firmware-name property,
+ * if a zap-shader is required, rather than falling back
+ * to a firmware name specified in gpulist.
+ *
+ * Because the firmware is signed with a (potentially)
+ * device specific key, having the name come from gpulist
+ * was a bad idea, and is only provided for backwards
+ * compatibility for older targets.
+ */
+ return -ENODEV;
+ }
+
+ if (IS_ERR(fw)) {
+ DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
+ return PTR_ERR(fw);
+ }
+
+ /* Figure out how much memory we need */
+ mem_size = qcom_mdt_get_size(fw);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto out;
+ }
+
+ if (mem_size > resource_size(&r)) {
+ DRM_DEV_ERROR(dev,
+ "memory region is too small to load the MDT\n");
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* Allocate memory for the firmware image */
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten through adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) {
+ ret = qcom_mdt_load(dev, fw, fwname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ } else {
+ char *newname;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
+ }
+ if (ret)
+ goto out;
+
+ /* Send the image to the secure world */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+
+ /*
+ * If the scm call returns -EOPNOTSUPP we assume that this target
+ * doesn't need/support the zap shader so quietly fail
+ */
+ if (ret == -EOPNOTSUPP)
+ zap_available = false;
+ else if (ret)
+ DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
+
+out:
+ if (mem_region)
+ memunmap(mem_region);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct platform_device *pdev = gpu->pdev;
+
+ /* Short cut if we determine the zap shader isn't available/needed */
+ if (!zap_available)
+ return -ENODEV;
+
+ /* We need SCM to be able to load the firmware */
+ if (!qcom_scm_is_available()) {
+ DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
+ return -EPROBE_DEFER;
+ }
+
+ return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
+}
+
+struct msm_gem_address_space *
+adreno_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev)
+{
+ return adreno_iommu_create_address_space(gpu, pdev, 0);
+}
+
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks)
+{
+ struct iommu_domain_geometry *geometry;
+ struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
+ u64 start, size;
+
+ mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
+ if (IS_ERR_OR_NULL(mmu))
+ return ERR_CAST(mmu);
+
+ geometry = msm_iommu_get_geometry(mmu);
+ if (IS_ERR(geometry))
+ return ERR_CAST(geometry);
+
+ /*
+ * Use the aperture start or SZ_16M, whichever is greater. This will
+ * ensure that we align with the allocated pagetable range while still
+ * allowing room in the lower 32 bits for GMEM and whatnot
+ */
+ start = max_t(u64, SZ_16M, geometry->aperture_start);
+ size = geometry->aperture_end - start + 1;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu",
+ start & GENMASK_ULL(48, 0), size);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
+u64 adreno_private_address_space_size(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ if (address_space_size)
+ return address_space_size;
+
+ if (adreno_gpu->info->address_space_size)
+ return adreno_gpu->info->address_space_size;
+
+ return SZ_4G;
+}
+
+#define ARM_SMMU_FSR_TF BIT(1)
+#define ARM_SMMU_FSR_PF BIT(3)
+#define ARM_SMMU_FSR_EF BIT(4)
+
+int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
+ struct adreno_smmu_fault_info *info, const char *block,
+ u32 scratch[4])
+{
+ const char *type = "UNKNOWN";
+ bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
+
+ /*
+ * If we aren't going to be resuming later from fault_worker, then do
+ * it now.
+ */
+ if (!do_devcoredump) {
+ gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+ }
+
+ /*
+ * Print a default message if we couldn't get the data from the
+ * adreno-smmu-priv
+ */
+ if (!info) {
+ pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ scratch[0], scratch[1], scratch[2], scratch[3]);
+
+ return 0;
+ }
+
+ if (info->fsr & ARM_SMMU_FSR_TF)
+ type = "TRANSLATION";
+ else if (info->fsr & ARM_SMMU_FSR_PF)
+ type = "PERMISSION";
+ else if (info->fsr & ARM_SMMU_FSR_EF)
+ type = "EXTERNAL";
+
+ pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%u)\n",
+ info->ttbr0, iova,
+ flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ",
+ type, block,
+ scratch[0], scratch[1], scratch[2], scratch[3]);
+
+ if (do_devcoredump) {
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ gpu->fault_info.ttbr0 = info->ttbr0;
+ gpu->fault_info.iova = iova;
+ gpu->fault_info.flags = flags;
+ gpu->fault_info.type = type;
+ gpu->fault_info.block = block;
+
+ kthread_queue_work(gpu->worker, &gpu->fault_work);
+ }
+
+ return 0;
+}
+
+int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t *value, uint32_t *len)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* No pointer params yet */
+ if (*len != 0)
+ return -EINVAL;
+
+ switch (param) {
+ case MSM_PARAM_GPU_ID:
+ *value = adreno_gpu->info->revn;
+ return 0;
+ case MSM_PARAM_GMEM_SIZE:
+ *value = adreno_gpu->info->gmem;
+ return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0;
+ return 0;
+ case MSM_PARAM_CHIP_ID:
+ *value = adreno_gpu->chip_id;
+ if (!adreno_gpu->info->revn)
+ *value |= ((uint64_t) adreno_gpu->speedbin) << 32;
+ return 0;
+ case MSM_PARAM_MAX_FREQ:
+ *value = adreno_gpu->base.fast_rate;
+ return 0;
+ case MSM_PARAM_TIMESTAMP:
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
+ return -EINVAL;
+ case MSM_PARAM_PRIORITIES:
+ *value = gpu->nr_rings * NR_SCHED_PRIORITIES;
+ return 0;
+ case MSM_PARAM_PP_PGTABLE:
+ *value = 0;
+ return 0;
+ case MSM_PARAM_FAULTS:
+ if (ctx->aspace)
+ *value = gpu->global_faults + ctx->aspace->faults;
+ else
+ *value = gpu->global_faults;
+ return 0;
+ case MSM_PARAM_SUSPENDS:
+ *value = gpu->suspend_count;
+ return 0;
+ case MSM_PARAM_VA_START:
+ if (ctx->aspace == gpu->aspace)
+ return -EINVAL;
+ *value = ctx->aspace->va_start;
+ return 0;
+ case MSM_PARAM_VA_SIZE:
+ if (ctx->aspace == gpu->aspace)
+ return -EINVAL;
+ *value = ctx->aspace->va_size;
+ return 0;
+ default:
+ DBG("%s: invalid param: %u", gpu->name, param);
+ return -EINVAL;
+ }
+}
+
+int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t value, uint32_t len)
+{
+ switch (param) {
+ case MSM_PARAM_COMM:
+ case MSM_PARAM_CMDLINE:
+ /* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so
+ * that should be a reasonable upper bound
+ */
+ if (len > PAGE_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ if (len != 0)
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case MSM_PARAM_COMM:
+ case MSM_PARAM_CMDLINE: {
+ char *str, **paramp;
+
+ str = memdup_user_nul(u64_to_user_ptr(value), len);
+ if (IS_ERR(str))
+ return PTR_ERR(str);
+
+ mutex_lock(&gpu->lock);
+
+ if (param == MSM_PARAM_COMM) {
+ paramp = &ctx->comm;
+ } else {
+ paramp = &ctx->cmdline;
+ }
+
+ kfree(*paramp);
+ *paramp = str;
+
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+ }
+ case MSM_PARAM_SYSPROF:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return msm_file_private_set_sysprof(ctx, gpu, value);
+ default:
+ DBG("%s: invalid param: %u", gpu->name, param);
+ return -EINVAL;
+ }
+}
+
+const struct firmware *
+adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
+{
+ struct drm_device *drm = adreno_gpu->base.dev;
+ const struct firmware *fw = NULL;
+ char *newname;
+ int ret;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+ if (!newname)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try first to load from qcom/$fwfile using a direct load (to avoid
+ * a potential timeout waiting for usermode helper)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
+
+ ret = request_firmware_direct(&fw, newname, drm->dev);
+ if (!ret) {
+ DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_NEW;
+ goto out;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ fw = ERR_PTR(ret);
+ goto out;
+ }
+ }
+
+ /*
+ * Then try the legacy location without qcom/ prefix
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
+
+ ret = request_firmware_direct(&fw, fwname, drm->dev);
+ if (!ret) {
+ DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_LEGACY;
+ goto out;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
+ fwname, ret);
+ fw = ERR_PTR(ret);
+ goto out;
+ }
+ }
+
+ /*
+ * Finally fall back to request_firmware() for cases where the
+ * usermode helper is needed (I think mainly android)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
+
+ ret = request_firmware(&fw, newname, drm->dev);
+ if (!ret) {
+ DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_HELPER;
+ goto out;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ fw = ERR_PTR(ret);
+ goto out;
+ }
+ }
+
+ DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
+ fw = ERR_PTR(-ENOENT);
+out:
+ kfree(newname);
+ return fw;
+}
+
+int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
+ const struct firmware *fw;
+
+ if (!adreno_gpu->info->fw[i])
+ continue;
+
+ /* Skip loading GMU firwmare with GMU Wrapper */
+ if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU)
+ continue;
+
+ /* Skip if the firmware has already been loaded */
+ if (adreno_gpu->fw[i])
+ continue;
+
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
+
+ adreno_gpu->fw[i] = fw;
+ }
+
+ return 0;
+}
+
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
+ MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+
+ return bo;
+}
+
+int adreno_hw_init(struct msm_gpu *gpu)
+{
+ VERB("%s", gpu->name);
+
+ for (int i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ if (!ring)
+ continue;
+
+ ring->cur = ring->start;
+ ring->next = ring->start;
+ ring->memptrs->rptr = 0;
+
+ /* Detect and clean up an impossible fence, ie. if GPU managed
+ * to scribble something invalid, we don't want that to confuse
+ * us into mistakingly believing that submits have completed.
+ */
+ if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) {
+ ring->memptrs->fence = ring->fctx->last_fence;
+ }
+ }
+
+ return 0;
+}
+
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ return gpu->funcs->get_rptr(gpu, ring);
+}
+
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
+{
+ return gpu->rb[0];
+}
+
+void adreno_recover(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret;
+
+ // XXX pm-runtime?? we *need* the device to be off after this
+ // so maybe continuing to call ->pm_suspend/resume() is better?
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->pm_resume(gpu);
+
+ ret = msm_gpu_hw_init(gpu);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
+ /* hmm, oh well? */
+ }
+}
+
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
+{
+ uint32_t wptr;
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /*
+ * Mask wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(ring);
+
+ /* ensure writes to ringbuffer have hit system memory: */
+ mb();
+
+ gpu_write(gpu, reg, wptr);
+}
+
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t wptr = get_wptr(ring);
+
+ /* wait for CP to drain ringbuffer: */
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
+ return true;
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
+ return false;
+}
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count = 0;
+
+ WARN_ON(!mutex_is_locked(&gpu->lock));
+
+ kref_init(&state->ref);
+
+ ktime_get_real_ts64(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int size = 0, j;
+
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].iova = gpu->rb[i]->iova;
+ state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+ /* Copy at least 'wptr' dwords of the data */
+ size = state->ring[i].wptr;
+
+ /* After wptr find the last non zero dword to save space */
+ for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+ if (gpu->rb[i]->start[j])
+ size = j + 1;
+
+ if (size) {
+ state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
+ if (state->ring[i].data) {
+ memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data_size = size << 2;
+ }
+ }
+ }
+
+ /* Some targets prefer to collect their own registers */
+ if (!adreno_gpu->registers)
+ return 0;
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 start = adreno_gpu->registers[i];
+ u32 end = adreno_gpu->registers[i + 1];
+ u32 addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+ kvfree(state->ring[i].data);
+
+ for (i = 0; state->bos && i < state->nr_bos; i++)
+ kvfree(state->bos[i].data);
+
+ kfree(state->bos);
+ kfree(state->comm);
+ kfree(state->cmd);
+ kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+
+ adreno_gpu_state_destroy(state);
+ kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
+{
+ void *buf;
+ size_t buf_itr = 0, buffer_size;
+ char out[ASCII85_BUFSZ];
+ long l;
+ int i;
+
+ if (!src || !len)
+ return NULL;
+
+ l = ascii85_encode_len(len);
+
+ /*
+ * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+ * account for the worst case of 5 bytes per dword plus the 1 for '\0'
+ */
+ buffer_size = (l * 5) + 1;
+
+ buf = kvmalloc(buffer_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ for (i = 0; i < l; i++)
+ buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ ascii85_encode(src[i], out));
+
+ return buf;
+}
+
+/* len is expected to be in bytes
+ *
+ * WARNING: *ptr should be allocated with kvmalloc or friends. It can be free'd
+ * with kvfree() and replaced with a newly kvmalloc'd buffer on the first call
+ * when the unencoded raw data is encoded
+ */
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded)
+{
+ if (!*ptr || !len)
+ return;
+
+ if (!*encoded) {
+ long datalen, i;
+ u32 *buf = *ptr;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will
+ * any data completely fill the entire allocated size of
+ * the buffer.
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++)
+ if (buf[i])
+ datalen = ((i + 1) << 2);
+
+ /*
+ * If we reach here, then the originally captured binary buffer
+ * will be replaced with the ascii85 encoded string
+ */
+ *ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+ kvfree(buf);
+
+ *encoded = true;
+ }
+
+ if (!*ptr)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+ drm_puts(p, *ptr);
+
+ drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "revision: %u (%"ADRENO_CHIPID_FMT")\n",
+ adreno_gpu->info->revn,
+ ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
+ /*
+ * If this is state collected due to iova fault, so fault related info
+ *
+ * TTBR0 would not be zero, so this is a good way to distinguish
+ */
+ if (state->fault_info.ttbr0) {
+ const struct msm_gpu_fault_info *info = &state->fault_info;
+
+ drm_puts(p, "fault-info:\n");
+ drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0);
+ drm_printf(p, " - iova=%.16lx\n", info->iova);
+ drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ");
+ drm_printf(p, " - type=%s\n", info->type);
+ drm_printf(p, " - source=%s\n", info->block);
+ }
+
+ drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
+
+ drm_puts(p, "ringbuffer:\n");
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ drm_printf(p, " - id: %d\n", i);
+ drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
+ drm_printf(p, " last-fence: %u\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %u\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %u\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %u\n", state->ring[i].wptr);
+ drm_printf(p, " size: %u\n", MSM_GPU_RINGBUFFER_SZ);
+
+ adreno_show_object(p, &state->ring[i].data,
+ state->ring[i].data_size, &state->ring[i].encoded);
+ }
+
+ if (state->bos) {
+ drm_puts(p, "bos:\n");
+
+ for (i = 0; i < state->nr_bos; i++) {
+ drm_printf(p, " - iova: 0x%016llx\n",
+ state->bos[i].iova);
+ drm_printf(p, " size: %zd\n", state->bos[i].size);
+ drm_printf(p, " name: %-32s\n", state->bos[i].name);
+
+ adreno_show_object(p, &state->bos[i].data,
+ state->bos[i].size, &state->bos[i].encoded);
+ }
+ }
+
+ if (state->nr_registers) {
+ drm_puts(p, "registers:\n");
+
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
+ }
+}
+#endif
+
+/* Dump common gpu status and scratch registers on any hang, to make
+ * the hangcheck logs more useful. The scratch registers seem always
+ * safe to read when GPU has hung (unlike some other regs, depending
+ * on how the GPU hung), and they are useful to match up to cmdstream
+ * dumps when debugging hangs:
+ */
+void adreno_dump_info(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ printk("revision: %u (%"ADRENO_CHIPID_FMT")\n",
+ adreno_gpu->info->revn,
+ ADRENO_CHIPID_ARGS(adreno_gpu->chip_id));
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ printk("rb %d: fence: %d/%d\n", i,
+ ring->memptrs->fence,
+ ring->fctx->last_fence);
+
+ printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
+ printk("rb wptr: %d\n", get_wptr(ring));
+ }
+}
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+void adreno_dump(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (!adreno_gpu->registers)
+ return;
+
+ /* dump these out in a form that can be parsed by demsm: */
+ printk("IO:region %s 00000000 00020000\n", gpu->name);
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ uint32_t start = adreno_gpu->registers[i];
+ uint32_t end = adreno_gpu->registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ uint32_t val = gpu_read(gpu, addr);
+ printk("IO:R %08x %08x\n", addr<<2, val);
+ }
+ }
+}
+
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
+ return (rptr + (size - 1) - wptr) % size;
+}
+
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
+{
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_DEV_ERROR(ring->gpu->dev->dev,
+ "timeout waiting for space in ringbuffer %d\n",
+ ring->id);
+}
+
+static int adreno_get_pwrlevels(struct device *dev,
+ struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned long freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ gpu->fast_rate = 0;
+
+ /* devm_pm_opp_of_add_table may error out but will still create an OPP table */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret == -ENODEV) {
+ /* Special cases for ancient hw with ancient DT bindings */
+ if (adreno_is_a2xx(adreno_gpu)) {
+ dev_warn(dev, "Unable to find the OPP table. Falling back to 200 MHz.\n");
+ dev_pm_opp_add(dev, 200000000, 0);
+ } else if (adreno_is_a320(adreno_gpu)) {
+ dev_warn(dev, "Unable to find the OPP table. Falling back to 450 MHz.\n");
+ dev_pm_opp_add(dev, 450000000, 0);
+ } else {
+ DRM_DEV_ERROR(dev, "Unable to find the OPP table\n");
+ return -ENODEV;
+ }
+ } else if (ret) {
+ DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
+ return ret;
+ }
+
+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ gpu->fast_rate = freq;
+ dev_pm_opp_put(opp);
+
+ DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+
+ return 0;
+}
+
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *adreno_ocmem)
+{
+ struct ocmem_buf *ocmem_hdl;
+ struct ocmem *ocmem;
+
+ ocmem = of_get_ocmem(dev);
+ if (IS_ERR(ocmem)) {
+ if (PTR_ERR(ocmem) == -ENODEV) {
+ /*
+ * Return success since either the ocmem property was
+ * not specified in device tree, or ocmem support is
+ * not compiled into the kernel.
+ */
+ return 0;
+ }
+
+ return PTR_ERR(ocmem);
+ }
+
+ ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem);
+ if (IS_ERR(ocmem_hdl))
+ return PTR_ERR(ocmem_hdl);
+
+ adreno_ocmem->ocmem = ocmem;
+ adreno_ocmem->base = ocmem_hdl->addr;
+ adreno_ocmem->hdl = ocmem_hdl;
+
+ if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *adreno_ocmem)
+{
+ if (adreno_ocmem && adreno_ocmem->base)
+ ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS,
+ adreno_ocmem->hdl);
+}
+
+int adreno_read_speedbin(struct device *dev, u32 *speedbin)
+{
+ return nvmem_cell_read_variable_le_u32(dev, "speed_bin", speedbin);
+}
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs, int nr_rings)
+{
+ struct device *dev = &pdev->dev;
+ struct adreno_platform_config *config = dev->platform_data;
+ struct msm_gpu_config adreno_gpu_config = { 0 };
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ const char *gpu_name;
+ u32 speedbin;
+ int ret;
+
+ adreno_gpu->funcs = funcs;
+ adreno_gpu->info = config->info;
+ adreno_gpu->chip_id = config->chip_id;
+
+ gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
+
+ /* Only handle the core clock when GMU is not in use (or is absent). */
+ if (adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_gpu->info->family < ADRENO_6XX_GEN1) {
+ /*
+ * This can only be done before devm_pm_opp_of_add_table(), or
+ * dev_pm_opp_set_config() will WARN_ON()
+ */
+ if (IS_ERR(devm_clk_get(dev, "core"))) {
+ /*
+ * If "core" is absent, go for the legacy clock name.
+ * If we got this far in probing, it's a given one of
+ * them exists.
+ */
+ devm_pm_opp_set_clkname(dev, "core_clk");
+ } else
+ devm_pm_opp_set_clkname(dev, "core");
+ }
+
+ if (adreno_read_speedbin(dev, &speedbin) || !speedbin)
+ speedbin = 0xffff;
+ adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin);
+
+ gpu_name = devm_kasprintf(dev, GFP_KERNEL, "%"ADRENO_CHIPID_FMT,
+ ADRENO_CHIPID_ARGS(config->chip_id));
+ if (!gpu_name)
+ return -ENOMEM;
+
+ adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
+
+ adreno_gpu_config.nr_rings = nr_rings;
+
+ ret = adreno_get_pwrlevels(dev, gpu);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(dev,
+ adreno_gpu->info->inactive_period);
+ pm_runtime_use_autosuspend(dev);
+
+ return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+ gpu_name, &adreno_gpu_config);
+}
+
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ release_firmware(adreno_gpu->fw[i]);
+
+ if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
+ pm_runtime_disable(&priv->gpu_pdev->dev);
+
+ msm_gpu_cleanup(&adreno_gpu->base);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 0000000000..49f38edf98
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __ADRENO_GPU_H__
+#define __ADRENO_GPU_H__
+
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+
+#include "msm_gpu.h"
+
+#include "adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+
+extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
+
+enum {
+ ADRENO_FW_PM4 = 0,
+ ADRENO_FW_SQE = 0, /* a6xx */
+ ADRENO_FW_PFP = 1,
+ ADRENO_FW_GMU = 1, /* a6xx */
+ ADRENO_FW_GPMU = 2,
+ ADRENO_FW_MAX,
+};
+
+/**
+ * @enum adreno_family: identify generation and possibly sub-generation
+ *
+ * In some cases there are distinct sub-generations within a major revision
+ * so it helps to be able to group the GPU devices by generation and if
+ * necessary sub-generation.
+ */
+enum adreno_family {
+ ADRENO_2XX_GEN1, /* a20x */
+ ADRENO_2XX_GEN2, /* a22x */
+ ADRENO_3XX,
+ ADRENO_4XX,
+ ADRENO_5XX,
+ ADRENO_6XX_GEN1, /* a630 family */
+ ADRENO_6XX_GEN2, /* a640 family */
+ ADRENO_6XX_GEN3, /* a650 family */
+ ADRENO_6XX_GEN4, /* a660 family */
+};
+
+#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
+#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(1)
+#define ADRENO_QUIRK_LMLOADKILL_DISABLE BIT(2)
+#define ADRENO_QUIRK_HAS_HW_APRIV BIT(3)
+#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
+
+/* Helper for formating the chip_id in the way that userspace tools like
+ * crashdec expect.
+ */
+#define ADRENO_CHIPID_FMT "u.%u.%u.%u"
+#define ADRENO_CHIPID_ARGS(_c) \
+ (((_c) >> 24) & 0xff), \
+ (((_c) >> 16) & 0xff), \
+ (((_c) >> 8) & 0xff), \
+ ((_c) & 0xff)
+
+struct adreno_gpu_funcs {
+ struct msm_gpu_funcs base;
+ int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
+};
+
+struct adreno_reglist {
+ u32 offset;
+ u32 value;
+};
+
+extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
+extern const struct adreno_reglist a660_hwcg[], a690_hwcg[];
+
+struct adreno_speedbin {
+ uint16_t fuse;
+ uint16_t speedbin;
+};
+
+struct adreno_info {
+ const char *machine;
+ /**
+ * @chipids: Table of matching chip-ids
+ *
+ * Terminated with 0 sentinal
+ */
+ uint32_t *chip_ids;
+ enum adreno_family family;
+ uint32_t revn;
+ const char *fw[ADRENO_FW_MAX];
+ uint32_t gmem;
+ u64 quirks;
+ struct msm_gpu *(*init)(struct drm_device *dev);
+ const char *zapfw;
+ u32 inactive_period;
+ const struct adreno_reglist *hwcg;
+ u64 address_space_size;
+ /**
+ * @speedbins: Optional table of fuse to speedbin mappings
+ *
+ * Consists of pairs of fuse, index mappings, terminated with
+ * {SHRT_MAX, 0} sentinal.
+ */
+ struct adreno_speedbin *speedbins;
+};
+
+#define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 }
+
+/*
+ * Helper to build a speedbin table, ie. the table:
+ * fuse | speedbin
+ * -----+---------
+ * 0 | 0
+ * 169 | 1
+ * 174 | 2
+ *
+ * would be declared as:
+ *
+ * .speedbins = ADRENO_SPEEDBINS(
+ * { 0, 0 },
+ * { 169, 1 },
+ * { 174, 2 },
+ * ),
+ */
+#define ADRENO_SPEEDBINS(tbl...) (struct adreno_speedbin[]) { tbl {SHRT_MAX, 0} }
+
+struct adreno_gpu {
+ struct msm_gpu base;
+ const struct adreno_info *info;
+ uint32_t chip_id;
+ uint16_t speedbin;
+ const struct adreno_gpu_funcs *funcs;
+
+ /* interesting register offsets to dump: */
+ const unsigned int *registers;
+
+ /*
+ * Are we loading fw from legacy path? Prior to addition
+ * of gpu firmware to linux-firmware, the fw files were
+ * placed in toplevel firmware directory, following qcom's
+ * android kernel. But linux-firmware preferred they be
+ * placed in a 'qcom' subdirectory.
+ *
+ * For backwards compatibility, we try first to load from
+ * the new path, using request_firmware_direct() to avoid
+ * any potential timeout waiting for usermode helper, then
+ * fall back to the old path (with direct load). And
+ * finally fall back to request_firmware() with the new
+ * path to allow the usermode helper.
+ */
+ enum {
+ FW_LOCATION_UNKNOWN = 0,
+ FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
+ FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
+ FW_LOCATION_HELPER,
+ } fwloc;
+
+ /* firmware: */
+ const struct firmware *fw[ADRENO_FW_MAX];
+
+ /*
+ * Register offsets are different between some GPUs.
+ * GPU specific offsets will be exported by GPU specific
+ * code (a3xx_gpu.c) and stored in this common location.
+ */
+ const unsigned int *reg_offsets;
+ bool gmu_is_wrapper;
+};
+#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
+
+struct adreno_ocmem {
+ struct ocmem *ocmem;
+ unsigned long base;
+ void *hdl;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct adreno_platform_config {
+ uint32_t chip_id;
+ const struct adreno_info *info;
+};
+
+#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define spin_until(X) ({ \
+ int __ret = -ETIMEDOUT; \
+ unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
+ do { \
+ if (X) { \
+ __ret = 0; \
+ break; \
+ } \
+ } while (time_before(jiffies, __t)); \
+ __ret; \
+})
+
+static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu)
+{
+ /* It is probably ok to assume legacy "adreno_rev" format
+ * for all a6xx devices, but probably best to limit this
+ * to older things.
+ */
+ WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1);
+ return gpu->chip_id & 0xff;
+}
+
+static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->revn == revn;
+}
+
+static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
+{
+ return gpu->gmu_is_wrapper;
+}
+
+static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family <= ADRENO_2XX_GEN2;
+}
+
+static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_2XX_GEN1;
+}
+
+static inline bool adreno_is_a225(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 225);
+}
+
+static inline bool adreno_is_a305(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 305);
+}
+
+static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
+{
+ /* yes, 307, because a305c is 306 */
+ return adreno_is_revn(gpu, 307);
+}
+
+static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 320);
+}
+
+static inline bool adreno_is_a330(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 330);
+}
+
+static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu)
+{
+ return adreno_is_a330(gpu) && (adreno_patchid(gpu) > 0);
+}
+
+static inline int adreno_is_a405(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 405);
+}
+
+static inline int adreno_is_a420(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 420);
+}
+
+static inline int adreno_is_a430(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 430);
+}
+
+static inline int adreno_is_a506(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 506);
+}
+
+static inline int adreno_is_a508(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 508);
+}
+
+static inline int adreno_is_a509(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 509);
+}
+
+static inline int adreno_is_a510(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 510);
+}
+
+static inline int adreno_is_a512(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 512);
+}
+
+static inline int adreno_is_a530(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 530);
+}
+
+static inline int adreno_is_a540(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 540);
+}
+
+static inline int adreno_is_a610(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 610);
+}
+
+static inline int adreno_is_a618(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 618);
+}
+
+static inline int adreno_is_a619(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 619);
+}
+
+static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
+{
+ return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
+}
+
+static inline int adreno_is_a630(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 630);
+}
+
+static inline int adreno_is_a640(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 640);
+}
+
+static inline int adreno_is_a650(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 650);
+}
+
+static inline int adreno_is_7c3(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06030500;
+}
+
+static inline int adreno_is_a660(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 660);
+}
+
+static inline int adreno_is_a680(const struct adreno_gpu *gpu)
+{
+ return adreno_is_revn(gpu, 680);
+}
+
+static inline int adreno_is_a690(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06090000;
+}
+
+/* check for a615, a616, a618, a619 or any a630 derivatives */
+static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_6XX_GEN1;
+}
+
+static inline int adreno_is_a660_family(const struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_6XX_GEN4;
+}
+
+/* check for a650, a660, or any derivatives */
+static inline int adreno_is_a650_family(const struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family >= ADRENO_6XX_GEN3;
+}
+
+static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
+{
+ if (WARN_ON_ONCE(!gpu->info))
+ return false;
+ return gpu->info->family == ADRENO_6XX_GEN2;
+}
+
+u64 adreno_private_address_space_size(struct msm_gpu *gpu);
+int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t *value, uint32_t *len);
+int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
+ uint32_t param, uint64_t value, uint32_t len);
+const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
+ const char *fwname);
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova);
+int adreno_hw_init(struct msm_gpu *gpu);
+void adreno_recover(struct msm_gpu *gpu);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+#endif
+void adreno_dump_info(struct msm_gpu *gpu);
+void adreno_dump(struct msm_gpu *gpu);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
+
+int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
+ struct adreno_ocmem *ocmem);
+void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ int nr_rings);
+void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+int adreno_load_fw(struct adreno_gpu *adreno_gpu);
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded);
+
+/*
+ * Common helper function to initialize the default address space for arm-smmu
+ * attached targets
+ */
+struct msm_gem_address_space *
+adreno_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev,
+ unsigned long quirks);
+
+int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
+ struct adreno_smmu_fault_info *info, const char *block,
+ u32 scratch[4]);
+
+int adreno_read_speedbin(struct device *dev, u32 *speedbin);
+
+/*
+ * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
+ * out of secure mode
+ */
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
+
+/* ringbuffer helpers (the parts that are adreno specific) */
+
+static inline void
+OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt+1);
+ OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
+}
+
+/* no-op packet: */
+static inline void
+OUT_PKT2(struct msm_ringbuffer *ring)
+{
+ adreno_wait_ring(ring, 1);
+ OUT_RING(ring, CP_TYPE2_PKT);
+}
+
+static inline void
+OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt+1);
+ OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
+}
+
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 0000000000..8a4a2d161a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,2444 @@
+#ifndef ADRENO_PM4_XML
+#define ADRENO_PM4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno.xml ( 594 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2022-07-23 20:21:46)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a2xx.xml ( 91929 bytes, from 2023-02-28 23:52:27)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_common.xml ( 15434 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pm4.xml ( 74995 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a3xx.xml ( 84231 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a4xx.xml ( 113474 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a5xx.xml ( 149590 bytes, from 2023-02-14 19:37:12)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx.xml ( 198949 bytes, from 2023-03-20 18:06:23)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/a6xx_gmu.xml ( 11404 bytes, from 2023-03-10 18:32:53)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/ocmem.xml ( 1773 bytes, from 2022-08-02 16:38:43)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_control_regs.xml ( 9055 bytes, from 2023-03-10 18:32:52)
+- /home/robclark/src/mesa/mesa/src/freedreno/registers/adreno/adreno_pipe_regs.xml ( 2976 bytes, from 2023-03-10 18:32:52)
+
+Copyright (C) 2013-2023 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum vgt_event_type {
+ VS_DEALLOC = 0,
+ PS_DEALLOC = 1,
+ VS_DONE_TS = 2,
+ PS_DONE_TS = 3,
+ CACHE_FLUSH_TS = 4,
+ CONTEXT_DONE = 5,
+ CACHE_FLUSH = 6,
+ VIZQUERY_START = 7,
+ HLSQ_FLUSH = 7,
+ VIZQUERY_END = 8,
+ SC_WAIT_WC = 9,
+ WRITE_PRIMITIVE_COUNTS = 9,
+ START_PRIMITIVE_CTRS = 11,
+ STOP_PRIMITIVE_CTRS = 12,
+ RST_PIX_CNT = 13,
+ RST_VTX_CNT = 14,
+ TILE_FLUSH = 15,
+ STAT_EVENT = 16,
+ CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+ ZPASS_DONE = 21,
+ CACHE_FLUSH_AND_INV_EVENT = 22,
+ RB_DONE_TS = 22,
+ PERFCOUNTER_START = 23,
+ PERFCOUNTER_STOP = 24,
+ VS_FETCH_DONE = 27,
+ FACENESS_FLUSH = 28,
+ WT_DONE_TS = 8,
+ START_FRAGMENT_CTRS = 13,
+ STOP_FRAGMENT_CTRS = 14,
+ START_COMPUTE_CTRS = 15,
+ STOP_COMPUTE_CTRS = 16,
+ FLUSH_SO_0 = 17,
+ FLUSH_SO_1 = 18,
+ FLUSH_SO_2 = 19,
+ FLUSH_SO_3 = 20,
+ PC_CCU_INVALIDATE_DEPTH = 24,
+ PC_CCU_INVALIDATE_COLOR = 25,
+ PC_CCU_RESOLVE_TS = 26,
+ PC_CCU_FLUSH_DEPTH_TS = 28,
+ PC_CCU_FLUSH_COLOR_TS = 29,
+ BLIT = 30,
+ LRZ_CLEAR = 37,
+ LRZ_FLUSH = 38,
+ BLIT_OP_FILL_2D = 39,
+ BLIT_OP_COPY_2D = 40,
+ BLIT_OP_SCALE_2D = 42,
+ CONTEXT_DONE_2D = 43,
+ UNK_2C = 44,
+ UNK_2D = 45,
+ CACHE_INVALIDATE = 49,
+ LABEL = 63,
+ CCU_INVALIDATE_DEPTH = 24,
+ CCU_INVALIDATE_COLOR = 25,
+ CCU_RESOLVE_CLEAN = 26,
+ CCU_FLUSH_DEPTH = 28,
+ CCU_FLUSH_COLOR = 29,
+ CCU_RESOLVE = 30,
+ CCU_END_RESOLVE_GROUP = 31,
+ CCU_CLEAN_DEPTH = 32,
+ CCU_CLEAN_COLOR = 33,
+ CACHE_RESET = 48,
+ CACHE_CLEAN = 49,
+ CACHE_FLUSH7 = 50,
+ CACHE_INVALIDATE7 = 51,
+};
+
+enum pc_di_primtype {
+ DI_PT_NONE = 0,
+ DI_PT_POINTLIST_PSIZE = 1,
+ DI_PT_LINELIST = 2,
+ DI_PT_LINESTRIP = 3,
+ DI_PT_TRILIST = 4,
+ DI_PT_TRIFAN = 5,
+ DI_PT_TRISTRIP = 6,
+ DI_PT_LINELOOP = 7,
+ DI_PT_RECTLIST = 8,
+ DI_PT_POINTLIST = 9,
+ DI_PT_LINE_ADJ = 10,
+ DI_PT_LINESTRIP_ADJ = 11,
+ DI_PT_TRI_ADJ = 12,
+ DI_PT_TRISTRIP_ADJ = 13,
+ DI_PT_PATCHES0 = 31,
+ DI_PT_PATCHES1 = 32,
+ DI_PT_PATCHES2 = 33,
+ DI_PT_PATCHES3 = 34,
+ DI_PT_PATCHES4 = 35,
+ DI_PT_PATCHES5 = 36,
+ DI_PT_PATCHES6 = 37,
+ DI_PT_PATCHES7 = 38,
+ DI_PT_PATCHES8 = 39,
+ DI_PT_PATCHES9 = 40,
+ DI_PT_PATCHES10 = 41,
+ DI_PT_PATCHES11 = 42,
+ DI_PT_PATCHES12 = 43,
+ DI_PT_PATCHES13 = 44,
+ DI_PT_PATCHES14 = 45,
+ DI_PT_PATCHES15 = 46,
+ DI_PT_PATCHES16 = 47,
+ DI_PT_PATCHES17 = 48,
+ DI_PT_PATCHES18 = 49,
+ DI_PT_PATCHES19 = 50,
+ DI_PT_PATCHES20 = 51,
+ DI_PT_PATCHES21 = 52,
+ DI_PT_PATCHES22 = 53,
+ DI_PT_PATCHES23 = 54,
+ DI_PT_PATCHES24 = 55,
+ DI_PT_PATCHES25 = 56,
+ DI_PT_PATCHES26 = 57,
+ DI_PT_PATCHES27 = 58,
+ DI_PT_PATCHES28 = 59,
+ DI_PT_PATCHES29 = 60,
+ DI_PT_PATCHES30 = 61,
+ DI_PT_PATCHES31 = 62,
+};
+
+enum pc_di_src_sel {
+ DI_SRC_SEL_DMA = 0,
+ DI_SRC_SEL_IMMEDIATE = 1,
+ DI_SRC_SEL_AUTO_INDEX = 2,
+ DI_SRC_SEL_AUTO_XFB = 3,
+};
+
+enum pc_di_face_cull_sel {
+ DI_FACE_CULL_NONE = 0,
+ DI_FACE_CULL_FETCH = 1,
+ DI_FACE_BACKFACE_CULL = 2,
+ DI_FACE_FRONTFACE_CULL = 3,
+};
+
+enum pc_di_index_size {
+ INDEX_SIZE_IGN = 0,
+ INDEX_SIZE_16_BIT = 0,
+ INDEX_SIZE_32_BIT = 1,
+ INDEX_SIZE_8_BIT = 2,
+ INDEX_SIZE_INVALID = 0,
+};
+
+enum pc_di_vis_cull_mode {
+ IGNORE_VISIBILITY = 0,
+ USE_VISIBILITY = 1,
+};
+
+enum adreno_pm4_packet_type {
+ CP_TYPE0_PKT = 0,
+ CP_TYPE1_PKT = 0x40000000,
+ CP_TYPE2_PKT = 0x80000000,
+ CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
+};
+
+enum adreno_pm4_type3_packets {
+ CP_ME_INIT = 72,
+ CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
+ CP_INDIRECT_BUFFER = 63,
+ CP_INDIRECT_BUFFER_CHAIN = 87,
+ CP_INDIRECT_BUFFER_PFD = 55,
+ CP_WAIT_FOR_IDLE = 38,
+ CP_WAIT_REG_MEM = 60,
+ CP_WAIT_REG_EQ = 82,
+ CP_WAIT_REG_GTE = 83,
+ CP_WAIT_UNTIL_READ = 92,
+ CP_WAIT_IB_PFD_COMPLETE = 93,
+ CP_REG_RMW = 33,
+ CP_SET_BIN_DATA = 47,
+ CP_SET_BIN_DATA5 = 47,
+ CP_REG_TO_MEM = 62,
+ CP_MEM_WRITE = 61,
+ CP_MEM_WRITE_CNTR = 79,
+ CP_COND_EXEC = 68,
+ CP_COND_WRITE = 69,
+ CP_COND_WRITE5 = 69,
+ CP_EVENT_WRITE = 70,
+ CP_EVENT_WRITE_SHD = 88,
+ CP_EVENT_WRITE_CFL = 89,
+ CP_EVENT_WRITE_ZPD = 91,
+ CP_RUN_OPENCL = 49,
+ CP_DRAW_INDX = 34,
+ CP_DRAW_INDX_2 = 54,
+ CP_DRAW_INDX_BIN = 52,
+ CP_DRAW_INDX_2_BIN = 53,
+ CP_VIZ_QUERY = 35,
+ CP_SET_STATE = 37,
+ CP_SET_CONSTANT = 45,
+ CP_IM_LOAD = 39,
+ CP_IM_LOAD_IMMEDIATE = 43,
+ CP_LOAD_CONSTANT_CONTEXT = 46,
+ CP_INVALIDATE_STATE = 59,
+ CP_SET_SHADER_BASES = 74,
+ CP_SET_BIN_MASK = 80,
+ CP_SET_BIN_SELECT = 81,
+ CP_CONTEXT_UPDATE = 94,
+ CP_INTERRUPT = 64,
+ CP_IM_STORE = 44,
+ CP_SET_DRAW_INIT_FLAGS = 75,
+ CP_SET_PROTECTED_MODE = 95,
+ CP_BOOTSTRAP_UCODE = 111,
+ CP_LOAD_STATE = 48,
+ CP_LOAD_STATE4 = 48,
+ CP_COND_INDIRECT_BUFFER_PFE = 58,
+ CP_COND_INDIRECT_BUFFER_PFD = 50,
+ CP_INDIRECT_BUFFER_PFE = 63,
+ CP_SET_BIN = 76,
+ CP_TEST_TWO_MEMS = 113,
+ CP_REG_WR_NO_CTXT = 120,
+ CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
+ CP_WAIT_FOR_ME = 19,
+ CP_SET_DRAW_STATE = 67,
+ CP_DRAW_INDX_OFFSET = 56,
+ CP_DRAW_INDIRECT = 40,
+ CP_DRAW_INDX_INDIRECT = 41,
+ CP_DRAW_INDIRECT_MULTI = 42,
+ CP_DRAW_AUTO = 36,
+ CP_DRAW_PRED_ENABLE_GLOBAL = 25,
+ CP_DRAW_PRED_ENABLE_LOCAL = 26,
+ CP_DRAW_PRED_SET = 78,
+ CP_WIDE_REG_WRITE = 116,
+ CP_SCRATCH_TO_REG = 77,
+ CP_REG_TO_SCRATCH = 74,
+ CP_WAIT_MEM_WRITES = 18,
+ CP_COND_REG_EXEC = 71,
+ CP_MEM_TO_REG = 66,
+ CP_EXEC_CS_INDIRECT = 65,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_SET_MARKER = 101,
+ CP_SET_PSEUDO_REG = 86,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_WHERE_AM_I = 98,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
+ CP_BLIT = 44,
+ CP_REG_TEST = 57,
+ CP_SET_MODE = 99,
+ CP_LOAD_STATE6_GEOM = 50,
+ CP_LOAD_STATE6_FRAG = 52,
+ CP_LOAD_STATE6 = 54,
+ IN_IB_PREFETCH_END = 23,
+ IN_SUBBLK_PREFETCH = 31,
+ IN_INSTR_PREFETCH = 32,
+ IN_INSTR_MATCH = 71,
+ IN_CONST_PREFETCH = 73,
+ IN_INCR_UPDT_STATE = 85,
+ IN_INCR_UPDT_CONST = 86,
+ IN_INCR_UPDT_INSTR = 87,
+ PKT4 = 4,
+ IN_IB_END = 10,
+ IN_GMU_INTERRUPT = 11,
+ IN_PREEMPT = 15,
+ CP_SCRATCH_WRITE = 76,
+ CP_REG_TO_MEM_OFFSET_MEM = 116,
+ CP_REG_TO_MEM_OFFSET_REG = 114,
+ CP_WAIT_MEM_GTE = 20,
+ CP_WAIT_TWO_REGS = 112,
+ CP_MEMCPY = 117,
+ CP_SET_BIN_DATA5_OFFSET = 46,
+ CP_CONTEXT_SWITCH = 84,
+ CP_SET_CTXSWITCH_IB = 85,
+ CP_REG_WRITE = 109,
+ CP_START_BIN = 80,
+ CP_END_BIN = 81,
+ CP_PREEMPT_DISABLE = 108,
+ CP_WAIT_TIMESTAMP = 20,
+ CP_THREAD_CONTROL = 23,
+ CP_CONTEXT_REG_BUNCH2 = 93,
+ CP_UNK15 = 21,
+ CP_UNK16 = 22,
+ CP_UNK18 = 24,
+ CP_UNK1B = 27,
+ CP_UNK49 = 73,
+};
+
+enum adreno_state_block {
+ SB_VERT_TEX = 0,
+ SB_VERT_MIPADDR = 1,
+ SB_FRAG_TEX = 2,
+ SB_FRAG_MIPADDR = 3,
+ SB_VERT_SHADER = 4,
+ SB_GEOM_SHADER = 5,
+ SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
+};
+
+enum adreno_state_type {
+ ST_SHADER = 0,
+ ST_CONSTANTS = 1,
+};
+
+enum adreno_state_src {
+ SS_DIRECT = 0,
+ SS_INVALID_ALL_IC = 2,
+ SS_INVALID_PART_IC = 3,
+ SS_INDIRECT = 4,
+ SS_INDIRECT_TCM = 5,
+ SS_INDIRECT_STM = 6,
+};
+
+enum a4xx_state_block {
+ SB4_VS_TEX = 0,
+ SB4_HS_TEX = 1,
+ SB4_DS_TEX = 2,
+ SB4_GS_TEX = 3,
+ SB4_FS_TEX = 4,
+ SB4_CS_TEX = 5,
+ SB4_VS_SHADER = 8,
+ SB4_HS_SHADER = 9,
+ SB4_DS_SHADER = 10,
+ SB4_GS_SHADER = 11,
+ SB4_FS_SHADER = 12,
+ SB4_CS_SHADER = 13,
+ SB4_SSBO = 14,
+ SB4_CS_SSBO = 15,
+};
+
+enum a4xx_state_type {
+ ST4_SHADER = 0,
+ ST4_CONSTANTS = 1,
+ ST4_UBO = 2,
+};
+
+enum a4xx_state_src {
+ SS4_DIRECT = 0,
+ SS4_INDIRECT = 2,
+};
+
+enum a6xx_state_block {
+ SB6_VS_TEX = 0,
+ SB6_HS_TEX = 1,
+ SB6_DS_TEX = 2,
+ SB6_GS_TEX = 3,
+ SB6_FS_TEX = 4,
+ SB6_CS_TEX = 5,
+ SB6_VS_SHADER = 8,
+ SB6_HS_SHADER = 9,
+ SB6_DS_SHADER = 10,
+ SB6_GS_SHADER = 11,
+ SB6_FS_SHADER = 12,
+ SB6_CS_SHADER = 13,
+ SB6_IBO = 14,
+ SB6_CS_IBO = 15,
+};
+
+enum a6xx_state_type {
+ ST6_SHADER = 0,
+ ST6_CONSTANTS = 1,
+ ST6_UBO = 2,
+ ST6_IBO = 3,
+};
+
+enum a6xx_state_src {
+ SS6_DIRECT = 0,
+ SS6_BINDLESS = 1,
+ SS6_INDIRECT = 2,
+ SS6_UBO = 3,
+};
+
+enum a4xx_index_size {
+ INDEX4_SIZE_8_BIT = 0,
+ INDEX4_SIZE_16_BIT = 1,
+ INDEX4_SIZE_32_BIT = 2,
+};
+
+enum a6xx_patch_type {
+ TESS_QUADS = 0,
+ TESS_TRIANGLES = 1,
+ TESS_ISOLINES = 2,
+};
+
+enum a6xx_draw_indirect_opcode {
+ INDIRECT_OP_NORMAL = 2,
+ INDIRECT_OP_INDEXED = 4,
+ INDIRECT_OP_INDIRECT_COUNT = 6,
+ INDIRECT_OP_INDIRECT_COUNT_INDEXED = 7,
+};
+
+enum cp_draw_pred_src {
+ PRED_SRC_MEM = 5,
+};
+
+enum cp_draw_pred_test {
+ NE_0_PASS = 0,
+ EQ_0_PASS = 1,
+};
+
+enum cp_cond_function {
+ WRITE_ALWAYS = 0,
+ WRITE_LT = 1,
+ WRITE_LE = 2,
+ WRITE_EQ = 3,
+ WRITE_NE = 4,
+ WRITE_GE = 5,
+ WRITE_GT = 6,
+};
+
+enum render_mode_cmd {
+ BYPASS = 1,
+ BINNING = 2,
+ GMEM = 3,
+ BLIT2D = 5,
+ BLIT2DSCALE = 7,
+ END2D = 8,
+};
+
+enum cp_blit_cmd {
+ BLIT_OP_FILL = 0,
+ BLIT_OP_COPY = 1,
+ BLIT_OP_SCALE = 3,
+};
+
+enum a6xx_marker {
+ RM6_BYPASS = 1,
+ RM6_BINNING = 2,
+ RM6_GMEM = 4,
+ RM6_ENDVIS = 5,
+ RM6_RESOLVE = 6,
+ RM6_YIELD = 7,
+ RM6_COMPUTE = 8,
+ RM6_BLIT2DSCALE = 12,
+ RM6_IB1LIST_START = 13,
+ RM6_IB1LIST_END = 14,
+ RM6_IFPC_ENABLE = 256,
+ RM6_IFPC_DISABLE = 257,
+};
+
+enum pseudo_reg {
+ SMMU_INFO = 0,
+ NON_SECURE_SAVE_ADDR = 1,
+ SECURE_SAVE_ADDR = 2,
+ NON_PRIV_SAVE_ADDR = 3,
+ COUNTER = 4,
+};
+
+enum compare_mode {
+ PRED_TEST = 1,
+ REG_COMPARE = 2,
+ RENDER_MODE = 3,
+};
+
+enum ctxswitch_ib {
+ RESTORE_IB = 0,
+ YIELD_RESTORE_IB = 1,
+ SAVE_IB = 2,
+ RB_SAVE_IB = 3,
+};
+
+enum reg_tracker {
+ TRACK_CNTL_REG = 1,
+ TRACK_RENDER_CNTL = 2,
+ UNK_EVENT_WRITE = 4,
+ TRACK_LRZ = 8,
+};
+
+enum cp_thread {
+ CP_SET_THREAD_BR = 1,
+ CP_SET_THREAD_BV = 2,
+ CP_SET_THREAD_BOTH = 3,
+};
+
+#define REG_CP_LOAD_STATE_0 0x00000000
+#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
+#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
+#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
+static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE_1 0x00000001
+#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
+#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
+{
+ return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE4_0 0x00000000
+#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE4_0_DST_OFF__SHIFT) & CP_LOAD_STATE4_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE4_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE4_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE4_0_STATE_SRC(enum a4xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE4_0_STATE_SRC__SHIFT) & CP_LOAD_STATE4_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE4_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE4_0_STATE_BLOCK(enum a4xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE4_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE4_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE4_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE4_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE4_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE4_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE4_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE4_1 0x00000001
+#define CP_LOAD_STATE4_1_STATE_TYPE__MASK 0x00000003
+#define CP_LOAD_STATE4_1_STATE_TYPE__SHIFT 0
+static inline uint32_t CP_LOAD_STATE4_1_STATE_TYPE(enum a4xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE4_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE4_1_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE4_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE4_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE4_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE4_2 0x00000002
+#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_0 0x00000000
+#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x0000c000
+#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
+static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_1 0x00000001
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_2 0x00000002
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_EXT_SRC_ADDR 0x00000001
+
+#define REG_CP_DRAW_INDX_0 0x00000000
+#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_1 0x00000001
+#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_3 0x00000003
+#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_4 0x00000004
+#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_0 0x00000000
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_1 0x00000001
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
+#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_2 0x00000002
+#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK 0x00003000
+#define CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PATCH_TYPE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_GS_ENABLE 0x00010000
+#define CP_DRAW_INDX_OFFSET_0_TESS_ENABLE 0x00020000
+
+#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
+#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_3_FIRST_INDX(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_3_FIRST_INDX__SHIFT) & CP_DRAW_INDX_OFFSET_3_FIRST_INDX__MASK;
+}
+
+
+#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE_LO__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_BASE_HI__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_INDX_BASE 0x00000004
+
+#define REG_CP_DRAW_INDX_OFFSET_6 0x00000006
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_6_MAX_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_6_MAX_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_6_MAX_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
+#define A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PATCH_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_GS_ENABLE 0x00010000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_ENABLE 0x00020000
+
+
+#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDIRECT_1_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDIRECT_INDIRECT 0x00000001
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK 0x00003000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PATCH_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_GS_ENABLE 0x00010000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_ENABLE 0x00020000
+
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDX_BASE 0x00000001
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_INDIRECT 0x00000004
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_0 0x00000000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK 0x0000003f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PRIM_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK 0x000000c0
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_SOURCE_SELECT__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK 0x00000300
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT 8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_VIS_CULL__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK 0x00000c00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_INDEX_SIZE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK 0x00003000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT 12
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE(enum a6xx_patch_type val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_0_PATCH_TYPE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_GS_ENABLE 0x00010000
+#define A6XX_CP_DRAW_INDIRECT_MULTI_0_TESS_ENABLE 0x00020000
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_1 0x00000001
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK 0x0000000f
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT 0
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(enum a6xx_draw_indirect_opcode val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE__MASK;
+}
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK 0x003fff00
+#define A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT 8
+static inline uint32_t A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(uint32_t val)
+{
+ return ((val) << A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__SHIFT) & A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF__MASK;
+}
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_DRAW_COUNT 0x00000002
+
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_INDIRECT 0x00000003
+
+#define REG_A6XX_CP_DRAW_INDIRECT_MULTI_STRIDE 0x00000005
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDEXED 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDEXED 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDEXED 0x00000006
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDEXED 0x00000008
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT 0x00000007
+
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDEX_INDIRECT_INDEXED 0x00000003
+
+#define REG_CP_DRAW_INDIRECT_MULTI_MAX_INDICES_INDIRECT_INDEXED 0x00000005
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_INDIRECT_INDEXED 0x00000006
+
+#define REG_CP_DRAW_INDIRECT_MULTI_INDIRECT_COUNT_INDIRECT_INDEXED 0x00000008
+
+#define REG_CP_DRAW_INDIRECT_MULTI_STRIDE_INDIRECT_INDEXED 0x0000000a
+
+#define REG_CP_DRAW_PRED_ENABLE_GLOBAL_0 0x00000000
+#define CP_DRAW_PRED_ENABLE_GLOBAL_0_ENABLE 0x00000001
+
+#define REG_CP_DRAW_PRED_ENABLE_LOCAL_0 0x00000000
+#define CP_DRAW_PRED_ENABLE_LOCAL_0_ENABLE 0x00000001
+
+#define REG_CP_DRAW_PRED_SET_0 0x00000000
+#define CP_DRAW_PRED_SET_0_SRC__MASK 0x000000f0
+#define CP_DRAW_PRED_SET_0_SRC__SHIFT 4
+static inline uint32_t CP_DRAW_PRED_SET_0_SRC(enum cp_draw_pred_src val)
+{
+ return ((val) << CP_DRAW_PRED_SET_0_SRC__SHIFT) & CP_DRAW_PRED_SET_0_SRC__MASK;
+}
+#define CP_DRAW_PRED_SET_0_TEST__MASK 0x00000100
+#define CP_DRAW_PRED_SET_0_TEST__SHIFT 8
+static inline uint32_t CP_DRAW_PRED_SET_0_TEST(enum cp_draw_pred_test val)
+{
+ return ((val) << CP_DRAW_PRED_SET_0_TEST__SHIFT) & CP_DRAW_PRED_SET_0_TEST__MASK;
+}
+
+#define REG_CP_DRAW_PRED_SET_MEM_ADDR 0x00000001
+
+static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE__0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_COUNT__SHIFT) & CP_SET_DRAW_STATE__0_COUNT__MASK;
+}
+#define CP_SET_DRAW_STATE__0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_BINNING 0x00100000
+#define CP_SET_DRAW_STATE__0_GMEM 0x00200000
+#define CP_SET_DRAW_STATE__0_SYSMEM 0x00400000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE__0_GROUP_ID__MASK;
+}
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__1_ADDR_LO__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__1_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__1_ADDR_LO__SHIFT) & CP_SET_DRAW_STATE__1_ADDR_LO__MASK;
+}
+
+static inline uint32_t REG_CP_SET_DRAW_STATE__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define CP_SET_DRAW_STATE__2_ADDR_HI__MASK 0xffffffff
+#define CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE__2_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__2_ADDR_HI__SHIFT) & CP_SET_DRAW_STATE__2_ADDR_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_0 0x00000000
+
+#define REG_CP_SET_BIN_1 0x00000001
+#define CP_SET_BIN_1_X1__MASK 0x0000ffff
+#define CP_SET_BIN_1_X1__SHIFT 0
+static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
+}
+#define CP_SET_BIN_1_Y1__MASK 0xffff0000
+#define CP_SET_BIN_1_Y1__SHIFT 16
+static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
+}
+
+#define REG_CP_SET_BIN_2 0x00000002
+#define CP_SET_BIN_2_X2__MASK 0x0000ffff
+#define CP_SET_BIN_2_X2__SHIFT 0
+static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
+}
+#define CP_SET_BIN_2_Y2__MASK 0xffff0000
+#define CP_SET_BIN_2_Y2__SHIFT 16
+static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA_0 0x00000000
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA_1 0x00000001
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_0 0x00000000
+#define CP_SET_BIN_DATA5_0_VSC_SIZE__MASK 0x003f0000
+#define CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT 16
+static inline uint32_t CP_SET_BIN_DATA5_0_VSC_SIZE(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_0_VSC_SIZE__MASK;
+}
+#define CP_SET_BIN_DATA5_0_VSC_N__MASK 0x07c00000
+#define CP_SET_BIN_DATA5_0_VSC_N__SHIFT 22
+static inline uint32_t CP_SET_BIN_DATA5_0_VSC_N(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_0_VSC_N__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_1 0x00000001
+#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__SHIFT) & CP_SET_BIN_DATA5_1_BIN_DATA_ADDR_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_2 0x00000002
+#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__SHIFT) & CP_SET_BIN_DATA5_2_BIN_DATA_ADDR_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_3 0x00000003
+#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_3_BIN_SIZE_ADDRESS_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_4 0x00000004
+#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_5 0x00000005
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_PRIM_STRM_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_6 0x00000006
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__SHIFT) & CP_SET_BIN_DATA5_6_BIN_PRIM_STRM_HI__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_7 0x00000007
+
+#define REG_CP_SET_BIN_DATA5_9 0x00000009
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_0 0x00000000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK 0x003f0000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT 16
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_SIZE__MASK;
+}
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK 0x07c00000
+#define CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT 22
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_0_VSC_N(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_0_VSC_N__SHIFT) & CP_SET_BIN_DATA5_OFFSET_0_VSC_N__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_1 0x00000001
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_1_BIN_DATA_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_2 0x00000002
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_2_BIN_SIZE_OFFSET__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_OFFSET_3 0x00000003
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__SHIFT) & CP_SET_BIN_DATA5_OFFSET_3_BIN_DATA2_OFFSET__MASK;
+}
+
+#define REG_CP_REG_RMW_0 0x00000000
+#define CP_REG_RMW_0_DST_REG__MASK 0x0003ffff
+#define CP_REG_RMW_0_DST_REG__SHIFT 0
+static inline uint32_t CP_REG_RMW_0_DST_REG(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_0_DST_REG__SHIFT) & CP_REG_RMW_0_DST_REG__MASK;
+}
+#define CP_REG_RMW_0_ROTATE__MASK 0x1f000000
+#define CP_REG_RMW_0_ROTATE__SHIFT 24
+static inline uint32_t CP_REG_RMW_0_ROTATE(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_0_ROTATE__SHIFT) & CP_REG_RMW_0_ROTATE__MASK;
+}
+#define CP_REG_RMW_0_SRC1_ADD 0x20000000
+#define CP_REG_RMW_0_SRC1_IS_REG 0x40000000
+#define CP_REG_RMW_0_SRC0_IS_REG 0x80000000
+
+#define REG_CP_REG_RMW_1 0x00000001
+#define CP_REG_RMW_1_SRC0__MASK 0xffffffff
+#define CP_REG_RMW_1_SRC0__SHIFT 0
+static inline uint32_t CP_REG_RMW_1_SRC0(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_1_SRC0__SHIFT) & CP_REG_RMW_1_SRC0__MASK;
+}
+
+#define REG_CP_REG_RMW_2 0x00000002
+#define CP_REG_RMW_2_SRC1__MASK 0xffffffff
+#define CP_REG_RMW_2_SRC1__SHIFT 0
+static inline uint32_t CP_REG_RMW_2_SRC1(uint32_t val)
+{
+ return ((val) << CP_REG_RMW_2_SRC1__SHIFT) & CP_REG_RMW_2_SRC1__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_0 0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_1 0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_2 0x00000002
+#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_0 0x00000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_0_64B 0x40000000
+#define CP_REG_TO_MEM_OFFSET_REG_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_1 0x00000001
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_2 0x00000002
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_REG_3 0x00000003
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__SHIFT) & CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_REG_3_OFFSET0_SCRATCH 0x00080000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_0 0x00000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK 0x3ffc0000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT 18
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_OFFSET_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_OFFSET_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_1 0x00000001
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_2 0x00000002
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_3 0x00000003
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_3_OFFSET_LO__MASK;
+}
+
+#define REG_CP_REG_TO_MEM_OFFSET_MEM_4 0x00000004
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__SHIFT) & CP_REG_TO_MEM_OFFSET_MEM_4_OFFSET_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_0 0x00000000
+#define CP_MEM_TO_REG_0_REG__MASK 0x0003ffff
+#define CP_MEM_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK;
+}
+#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000
+#define CP_MEM_TO_REG_0_CNT__SHIFT 19
+static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
+}
+#define CP_MEM_TO_REG_0_SHIFT_BY_2 0x40000000
+#define CP_MEM_TO_REG_0_UNK31 0x80000000
+
+#define REG_CP_MEM_TO_REG_1 0x00000001
+#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
+#define CP_MEM_TO_REG_1_SRC__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_2 0x00000002
+#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff
+#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_MEM_0 0x00000000
+#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
+#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
+#define CP_MEM_TO_MEM_0_NEG_C 0x00000004
+#define CP_MEM_TO_MEM_0_DOUBLE 0x20000000
+#define CP_MEM_TO_MEM_0_WAIT_FOR_MEM_WRITES 0x40000000
+#define CP_MEM_TO_MEM_0_UNK31 0x80000000
+
+#define REG_CP_MEMCPY_0 0x00000000
+#define CP_MEMCPY_0_DWORDS__MASK 0xffffffff
+#define CP_MEMCPY_0_DWORDS__SHIFT 0
+static inline uint32_t CP_MEMCPY_0_DWORDS(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_0_DWORDS__SHIFT) & CP_MEMCPY_0_DWORDS__MASK;
+}
+
+#define REG_CP_MEMCPY_1 0x00000001
+#define CP_MEMCPY_1_SRC_LO__MASK 0xffffffff
+#define CP_MEMCPY_1_SRC_LO__SHIFT 0
+static inline uint32_t CP_MEMCPY_1_SRC_LO(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_1_SRC_LO__SHIFT) & CP_MEMCPY_1_SRC_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_2 0x00000002
+#define CP_MEMCPY_2_SRC_HI__MASK 0xffffffff
+#define CP_MEMCPY_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEMCPY_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_2_SRC_HI__SHIFT) & CP_MEMCPY_2_SRC_HI__MASK;
+}
+
+#define REG_CP_MEMCPY_3 0x00000003
+#define CP_MEMCPY_3_DST_LO__MASK 0xffffffff
+#define CP_MEMCPY_3_DST_LO__SHIFT 0
+static inline uint32_t CP_MEMCPY_3_DST_LO(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_3_DST_LO__SHIFT) & CP_MEMCPY_3_DST_LO__MASK;
+}
+
+#define REG_CP_MEMCPY_4 0x00000004
+#define CP_MEMCPY_4_DST_HI__MASK 0xffffffff
+#define CP_MEMCPY_4_DST_HI__SHIFT 0
+static inline uint32_t CP_MEMCPY_4_DST_HI(uint32_t val)
+{
+ return ((val) << CP_MEMCPY_4_DST_HI__SHIFT) & CP_MEMCPY_4_DST_HI__MASK;
+}
+
+#define REG_CP_REG_TO_SCRATCH_0 0x00000000
+#define CP_REG_TO_SCRATCH_0_REG__MASK 0x0003ffff
+#define CP_REG_TO_SCRATCH_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_SCRATCH_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_REG__SHIFT) & CP_REG_TO_SCRATCH_0_REG__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_SCRATCH__MASK 0x00700000
+#define CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_REG_TO_SCRATCH_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_SCRATCH__SHIFT) & CP_REG_TO_SCRATCH_0_SCRATCH__MASK;
+}
+#define CP_REG_TO_SCRATCH_0_CNT__MASK 0x07000000
+#define CP_REG_TO_SCRATCH_0_CNT__SHIFT 24
+static inline uint32_t CP_REG_TO_SCRATCH_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_SCRATCH_0_CNT__SHIFT) & CP_REG_TO_SCRATCH_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_TO_REG_0 0x00000000
+#define CP_SCRATCH_TO_REG_0_REG__MASK 0x0003ffff
+#define CP_SCRATCH_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_SCRATCH_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_REG__SHIFT) & CP_SCRATCH_TO_REG_0_REG__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_UNK18 0x00040000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__MASK 0x00700000
+#define CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_SCRATCH_TO_REG_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_SCRATCH__SHIFT) & CP_SCRATCH_TO_REG_0_SCRATCH__MASK;
+}
+#define CP_SCRATCH_TO_REG_0_CNT__MASK 0x07000000
+#define CP_SCRATCH_TO_REG_0_CNT__SHIFT 24
+static inline uint32_t CP_SCRATCH_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_TO_REG_0_CNT__SHIFT) & CP_SCRATCH_TO_REG_0_CNT__MASK;
+}
+
+#define REG_CP_SCRATCH_WRITE_0 0x00000000
+#define CP_SCRATCH_WRITE_0_SCRATCH__MASK 0x00700000
+#define CP_SCRATCH_WRITE_0_SCRATCH__SHIFT 20
+static inline uint32_t CP_SCRATCH_WRITE_0_SCRATCH(uint32_t val)
+{
+ return ((val) << CP_SCRATCH_WRITE_0_SCRATCH__SHIFT) & CP_SCRATCH_WRITE_0_SCRATCH__MASK;
+}
+
+#define REG_CP_MEM_WRITE_0 0x00000000
+#define CP_MEM_WRITE_0_ADDR_LO__MASK 0xffffffff
+#define CP_MEM_WRITE_0_ADDR_LO__SHIFT 0
+static inline uint32_t CP_MEM_WRITE_0_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_MEM_WRITE_0_ADDR_LO__SHIFT) & CP_MEM_WRITE_0_ADDR_LO__MASK;
+}
+
+#define REG_CP_MEM_WRITE_1 0x00000001
+#define CP_MEM_WRITE_1_ADDR_HI__MASK 0xffffffff
+#define CP_MEM_WRITE_1_ADDR_HI__SHIFT 0
+static inline uint32_t CP_MEM_WRITE_1_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_WRITE_1_ADDR_HI__SHIFT) & CP_MEM_WRITE_1_ADDR_HI__MASK;
+}
+
+#define REG_CP_COND_WRITE_0 0x00000000
+#define CP_COND_WRITE_0_FUNCTION__MASK 0x00000007
+#define CP_COND_WRITE_0_FUNCTION__SHIFT 0
+static inline uint32_t CP_COND_WRITE_0_FUNCTION(enum cp_cond_function val)
+{
+ return ((val) << CP_COND_WRITE_0_FUNCTION__SHIFT) & CP_COND_WRITE_0_FUNCTION__MASK;
+}
+#define CP_COND_WRITE_0_POLL_MEMORY 0x00000010
+#define CP_COND_WRITE_0_WRITE_MEMORY 0x00000100
+
+#define REG_CP_COND_WRITE_1 0x00000001
+#define CP_COND_WRITE_1_POLL_ADDR__MASK 0xffffffff
+#define CP_COND_WRITE_1_POLL_ADDR__SHIFT 0
+static inline uint32_t CP_COND_WRITE_1_POLL_ADDR(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_1_POLL_ADDR__SHIFT) & CP_COND_WRITE_1_POLL_ADDR__MASK;
+}
+
+#define REG_CP_COND_WRITE_2 0x00000002
+#define CP_COND_WRITE_2_REF__MASK 0xffffffff
+#define CP_COND_WRITE_2_REF__SHIFT 0
+static inline uint32_t CP_COND_WRITE_2_REF(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_2_REF__SHIFT) & CP_COND_WRITE_2_REF__MASK;
+}
+
+#define REG_CP_COND_WRITE_3 0x00000003
+#define CP_COND_WRITE_3_MASK__MASK 0xffffffff
+#define CP_COND_WRITE_3_MASK__SHIFT 0
+static inline uint32_t CP_COND_WRITE_3_MASK(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_3_MASK__SHIFT) & CP_COND_WRITE_3_MASK__MASK;
+}
+
+#define REG_CP_COND_WRITE_4 0x00000004
+#define CP_COND_WRITE_4_WRITE_ADDR__MASK 0xffffffff
+#define CP_COND_WRITE_4_WRITE_ADDR__SHIFT 0
+static inline uint32_t CP_COND_WRITE_4_WRITE_ADDR(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_4_WRITE_ADDR__SHIFT) & CP_COND_WRITE_4_WRITE_ADDR__MASK;
+}
+
+#define REG_CP_COND_WRITE_5 0x00000005
+#define CP_COND_WRITE_5_WRITE_DATA__MASK 0xffffffff
+#define CP_COND_WRITE_5_WRITE_DATA__SHIFT 0
+static inline uint32_t CP_COND_WRITE_5_WRITE_DATA(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE_5_WRITE_DATA__SHIFT) & CP_COND_WRITE_5_WRITE_DATA__MASK;
+}
+
+#define REG_CP_COND_WRITE5_0 0x00000000
+#define CP_COND_WRITE5_0_FUNCTION__MASK 0x00000007
+#define CP_COND_WRITE5_0_FUNCTION__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_0_FUNCTION(enum cp_cond_function val)
+{
+ return ((val) << CP_COND_WRITE5_0_FUNCTION__SHIFT) & CP_COND_WRITE5_0_FUNCTION__MASK;
+}
+#define CP_COND_WRITE5_0_SIGNED_COMPARE 0x00000008
+#define CP_COND_WRITE5_0_POLL_MEMORY 0x00000010
+#define CP_COND_WRITE5_0_POLL_SCRATCH 0x00000020
+#define CP_COND_WRITE5_0_WRITE_MEMORY 0x00000100
+
+#define REG_CP_COND_WRITE5_1 0x00000001
+#define CP_COND_WRITE5_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_1_POLL_ADDR_LO__SHIFT) & CP_COND_WRITE5_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_COND_WRITE5_2 0x00000002
+#define CP_COND_WRITE5_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_2_POLL_ADDR_HI__SHIFT) & CP_COND_WRITE5_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_COND_WRITE5_3 0x00000003
+#define CP_COND_WRITE5_3_REF__MASK 0xffffffff
+#define CP_COND_WRITE5_3_REF__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_3_REF(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_3_REF__SHIFT) & CP_COND_WRITE5_3_REF__MASK;
+}
+
+#define REG_CP_COND_WRITE5_4 0x00000004
+#define CP_COND_WRITE5_4_MASK__MASK 0xffffffff
+#define CP_COND_WRITE5_4_MASK__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_4_MASK(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_4_MASK__SHIFT) & CP_COND_WRITE5_4_MASK__MASK;
+}
+
+#define REG_CP_COND_WRITE5_5 0x00000005
+#define CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK 0xffffffff
+#define CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_5_WRITE_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_5_WRITE_ADDR_LO__SHIFT) & CP_COND_WRITE5_5_WRITE_ADDR_LO__MASK;
+}
+
+#define REG_CP_COND_WRITE5_6 0x00000006
+#define CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK 0xffffffff
+#define CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_6_WRITE_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_6_WRITE_ADDR_HI__SHIFT) & CP_COND_WRITE5_6_WRITE_ADDR_HI__MASK;
+}
+
+#define REG_CP_COND_WRITE5_7 0x00000007
+#define CP_COND_WRITE5_7_WRITE_DATA__MASK 0xffffffff
+#define CP_COND_WRITE5_7_WRITE_DATA__SHIFT 0
+static inline uint32_t CP_COND_WRITE5_7_WRITE_DATA(uint32_t val)
+{
+ return ((val) << CP_COND_WRITE5_7_WRITE_DATA__SHIFT) & CP_COND_WRITE5_7_WRITE_DATA__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_0 0x00000000
+#define CP_WAIT_MEM_GTE_0_RESERVED__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_0_RESERVED__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_0_RESERVED(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_0_RESERVED__SHIFT) & CP_WAIT_MEM_GTE_0_RESERVED__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_1 0x00000001
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_MEM_GTE_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_2 0x00000002
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_MEM_GTE_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_MEM_GTE_3 0x00000003
+#define CP_WAIT_MEM_GTE_3_REF__MASK 0xffffffff
+#define CP_WAIT_MEM_GTE_3_REF__SHIFT 0
+static inline uint32_t CP_WAIT_MEM_GTE_3_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_MEM_GTE_3_REF__SHIFT) & CP_WAIT_MEM_GTE_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_0 0x00000000
+#define CP_WAIT_REG_MEM_0_FUNCTION__MASK 0x00000007
+#define CP_WAIT_REG_MEM_0_FUNCTION__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_0_FUNCTION(enum cp_cond_function val)
+{
+ return ((val) << CP_WAIT_REG_MEM_0_FUNCTION__SHIFT) & CP_WAIT_REG_MEM_0_FUNCTION__MASK;
+}
+#define CP_WAIT_REG_MEM_0_SIGNED_COMPARE 0x00000008
+#define CP_WAIT_REG_MEM_0_POLL_MEMORY 0x00000010
+#define CP_WAIT_REG_MEM_0_POLL_SCRATCH 0x00000020
+#define CP_WAIT_REG_MEM_0_WRITE_MEMORY 0x00000100
+
+#define REG_CP_WAIT_REG_MEM_1 0x00000001
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_1_POLL_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_1_POLL_ADDR_LO__SHIFT) & CP_WAIT_REG_MEM_1_POLL_ADDR_LO__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_2 0x00000002
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_2_POLL_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_2_POLL_ADDR_HI__SHIFT) & CP_WAIT_REG_MEM_2_POLL_ADDR_HI__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_3 0x00000003
+#define CP_WAIT_REG_MEM_3_REF__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_3_REF__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_3_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_3_REF__SHIFT) & CP_WAIT_REG_MEM_3_REF__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_4 0x00000004
+#define CP_WAIT_REG_MEM_4_MASK__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_4_MASK__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_4_MASK(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_4_MASK__SHIFT) & CP_WAIT_REG_MEM_4_MASK__MASK;
+}
+
+#define REG_CP_WAIT_REG_MEM_5 0x00000005
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK 0xffffffff
+#define CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT 0
+static inline uint32_t CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(uint32_t val)
+{
+ return ((val) << CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__SHIFT) & CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_0 0x00000000
+#define CP_WAIT_TWO_REGS_0_REG0__MASK 0x0003ffff
+#define CP_WAIT_TWO_REGS_0_REG0__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_0_REG0(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_0_REG0__SHIFT) & CP_WAIT_TWO_REGS_0_REG0__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_1 0x00000001
+#define CP_WAIT_TWO_REGS_1_REG1__MASK 0x0003ffff
+#define CP_WAIT_TWO_REGS_1_REG1__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_1_REG1(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_1_REG1__SHIFT) & CP_WAIT_TWO_REGS_1_REG1__MASK;
+}
+
+#define REG_CP_WAIT_TWO_REGS_2 0x00000002
+#define CP_WAIT_TWO_REGS_2_REF__MASK 0xffffffff
+#define CP_WAIT_TWO_REGS_2_REF__SHIFT 0
+static inline uint32_t CP_WAIT_TWO_REGS_2_REF(uint32_t val)
+{
+ return ((val) << CP_WAIT_TWO_REGS_2_REF__SHIFT) & CP_WAIT_TWO_REGS_2_REF__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+#define CP_SET_RENDER_MODE_0_MODE__MASK 0x000001ff
+#define CP_SET_RENDER_MODE_0_MODE__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_0_MODE(enum render_mode_cmd val)
+{
+ return ((val) << CP_SET_RENDER_MODE_0_MODE__SHIFT) & CP_SET_RENDER_MODE_0_MODE__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_VSC_ENABLE 0x00000008
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_0 0x00000000
+#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_0_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_1 0x00000001
+#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
+
+#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
+
+#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
+#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT) & CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_6 0x00000006
+#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
+}
+
+#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007
+
+#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
+
+#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__SHIFT) & CP_PERFCOUNTER_ACTION_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_PERFCOUNTER_ACTION_2 0x00000002
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_PERFCOUNTER_ACTION_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__SHIFT) & CP_PERFCOUNTER_ACTION_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_0 0x00000000
+#define CP_EVENT_WRITE_0_EVENT__MASK 0x000000ff
+#define CP_EVENT_WRITE_0_EVENT__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_0_EVENT(enum vgt_event_type val)
+{
+ return ((val) << CP_EVENT_WRITE_0_EVENT__SHIFT) & CP_EVENT_WRITE_0_EVENT__MASK;
+}
+#define CP_EVENT_WRITE_0_TIMESTAMP 0x40000000
+#define CP_EVENT_WRITE_0_IRQ 0x80000000
+
+#define REG_CP_EVENT_WRITE_1 0x00000001
+#define CP_EVENT_WRITE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_1_ADDR_0_LO__SHIFT) & CP_EVENT_WRITE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_2 0x00000002
+#define CP_EVENT_WRITE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_EVENT_WRITE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_EVENT_WRITE_2_ADDR_0_HI__SHIFT) & CP_EVENT_WRITE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_EVENT_WRITE_3 0x00000003
+
+#define REG_CP_BLIT_0 0x00000000
+#define CP_BLIT_0_OP__MASK 0x0000000f
+#define CP_BLIT_0_OP__SHIFT 0
+static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
+{
+ return ((val) << CP_BLIT_0_OP__SHIFT) & CP_BLIT_0_OP__MASK;
+}
+
+#define REG_CP_BLIT_1 0x00000001
+#define CP_BLIT_1_SRC_X1__MASK 0x00003fff
+#define CP_BLIT_1_SRC_X1__SHIFT 0
+static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
+}
+#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000
+#define CP_BLIT_1_SRC_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_1_SRC_Y1__SHIFT) & CP_BLIT_1_SRC_Y1__MASK;
+}
+
+#define REG_CP_BLIT_2 0x00000002
+#define CP_BLIT_2_SRC_X2__MASK 0x00003fff
+#define CP_BLIT_2_SRC_X2__SHIFT 0
+static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
+}
+#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000
+#define CP_BLIT_2_SRC_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_2_SRC_Y2__SHIFT) & CP_BLIT_2_SRC_Y2__MASK;
+}
+
+#define REG_CP_BLIT_3 0x00000003
+#define CP_BLIT_3_DST_X1__MASK 0x00003fff
+#define CP_BLIT_3_DST_X1__SHIFT 0
+static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
+}
+#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000
+#define CP_BLIT_3_DST_Y1__SHIFT 16
+static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
+{
+ return ((val) << CP_BLIT_3_DST_Y1__SHIFT) & CP_BLIT_3_DST_Y1__MASK;
+}
+
+#define REG_CP_BLIT_4 0x00000004
+#define CP_BLIT_4_DST_X2__MASK 0x00003fff
+#define CP_BLIT_4_DST_X2__SHIFT 0
+static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
+}
+#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000
+#define CP_BLIT_4_DST_Y2__SHIFT 16
+static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
+{
+ return ((val) << CP_BLIT_4_DST_Y2__SHIFT) & CP_BLIT_4_DST_Y2__MASK;
+}
+
+#define REG_CP_EXEC_CS_0 0x00000000
+
+#define REG_CP_EXEC_CS_1 0x00000001
+#define CP_EXEC_CS_1_NGROUPS_X__MASK 0xffffffff
+#define CP_EXEC_CS_1_NGROUPS_X__SHIFT 0
+static inline uint32_t CP_EXEC_CS_1_NGROUPS_X(uint32_t val)
+{
+ return ((val) << CP_EXEC_CS_1_NGROUPS_X__SHIFT) & CP_EXEC_CS_1_NGROUPS_X__MASK;
+}
+
+#define REG_CP_EXEC_CS_2 0x00000002
+#define CP_EXEC_CS_2_NGROUPS_Y__MASK 0xffffffff
+#define CP_EXEC_CS_2_NGROUPS_Y__SHIFT 0
+static inline uint32_t CP_EXEC_CS_2_NGROUPS_Y(uint32_t val)
+{
+ return ((val) << CP_EXEC_CS_2_NGROUPS_Y__SHIFT) & CP_EXEC_CS_2_NGROUPS_Y__MASK;
+}
+
+#define REG_CP_EXEC_CS_3 0x00000003
+#define CP_EXEC_CS_3_NGROUPS_Z__MASK 0xffffffff
+#define CP_EXEC_CS_3_NGROUPS_Z__SHIFT 0
+static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
+{
+ return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
+
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
+}
+
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_CP_SET_MARKER_0 0x00000000
+#define A6XX_CP_SET_MARKER_0_MODE__MASK 0x000001ff
+#define A6XX_CP_SET_MARKER_0_MODE__SHIFT 0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MODE(enum a6xx_marker val)
+{
+ return ((val) << A6XX_CP_SET_MARKER_0_MODE__SHIFT) & A6XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A6XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
+#define A6XX_CP_SET_MARKER_0_MARKER__SHIFT 0
+static inline uint32_t A6XX_CP_SET_MARKER_0_MARKER(enum a6xx_marker val)
+{
+ return ((val) << A6XX_CP_SET_MARKER_0_MARKER__SHIFT) & A6XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A6XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A6XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A6XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
+#define A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
+static inline uint32_t A6XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+ return ((val) << A6XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A6XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A6XX_CP_REG_TEST_0 0x00000000
+#define A6XX_CP_REG_TEST_0_REG__MASK 0x0003ffff
+#define A6XX_CP_REG_TEST_0_REG__SHIFT 0
+static inline uint32_t A6XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_REG__SHIFT) & A6XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A6XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
+#define A6XX_CP_REG_TEST_0_BIT__SHIFT 20
+static inline uint32_t A6XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_BIT__SHIFT) & A6XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A6XX_CP_REG_TEST_0_SKIP_WAIT_FOR_ME 0x02000000
+#define A6XX_CP_REG_TEST_0_PRED_BIT__MASK 0x7c000000
+#define A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT 26
+static inline uint32_t A6XX_CP_REG_TEST_0_PRED_BIT(uint32_t val)
+{
+ return ((val) << A6XX_CP_REG_TEST_0_PRED_BIT__SHIFT) & A6XX_CP_REG_TEST_0_PRED_BIT__MASK;
+}
+#define A6XX_CP_REG_TEST_0_PRED_UPDATE 0x80000000
+
+#define REG_A6XX_CP_REG_TEST_PRED_MASK 0x00000001
+
+#define REG_A6XX_CP_REG_TEST_PRED_VAL 0x00000002
+
+#define REG_CP_COND_REG_EXEC_0 0x00000000
+#define CP_COND_REG_EXEC_0_REG0__MASK 0x0003ffff
+#define CP_COND_REG_EXEC_0_REG0__SHIFT 0
+static inline uint32_t CP_COND_REG_EXEC_0_REG0(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_0_REG0__SHIFT) & CP_COND_REG_EXEC_0_REG0__MASK;
+}
+#define CP_COND_REG_EXEC_0_PRED_BIT__MASK 0x007c0000
+#define CP_COND_REG_EXEC_0_PRED_BIT__SHIFT 18
+static inline uint32_t CP_COND_REG_EXEC_0_PRED_BIT(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_0_PRED_BIT__SHIFT) & CP_COND_REG_EXEC_0_PRED_BIT__MASK;
+}
+#define CP_COND_REG_EXEC_0_BINNING 0x02000000
+#define CP_COND_REG_EXEC_0_GMEM 0x04000000
+#define CP_COND_REG_EXEC_0_SYSMEM 0x08000000
+#define CP_COND_REG_EXEC_0_MODE__MASK 0xf0000000
+#define CP_COND_REG_EXEC_0_MODE__SHIFT 28
+static inline uint32_t CP_COND_REG_EXEC_0_MODE(enum compare_mode val)
+{
+ return ((val) << CP_COND_REG_EXEC_0_MODE__SHIFT) & CP_COND_REG_EXEC_0_MODE__MASK;
+}
+
+#define REG_CP_COND_REG_EXEC_1 0x00000001
+#define CP_COND_REG_EXEC_1_DWORDS__MASK 0xffffffff
+#define CP_COND_REG_EXEC_1_DWORDS__SHIFT 0
+static inline uint32_t CP_COND_REG_EXEC_1_DWORDS(uint32_t val)
+{
+ return ((val) << CP_COND_REG_EXEC_1_DWORDS__SHIFT) & CP_COND_REG_EXEC_1_DWORDS__MASK;
+}
+
+#define REG_CP_COND_EXEC_0 0x00000000
+#define CP_COND_EXEC_0_ADDR0_LO__MASK 0xffffffff
+#define CP_COND_EXEC_0_ADDR0_LO__SHIFT 0
+static inline uint32_t CP_COND_EXEC_0_ADDR0_LO(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_0_ADDR0_LO__SHIFT) & CP_COND_EXEC_0_ADDR0_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_1 0x00000001
+#define CP_COND_EXEC_1_ADDR0_HI__MASK 0xffffffff
+#define CP_COND_EXEC_1_ADDR0_HI__SHIFT 0
+static inline uint32_t CP_COND_EXEC_1_ADDR0_HI(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_1_ADDR0_HI__SHIFT) & CP_COND_EXEC_1_ADDR0_HI__MASK;
+}
+
+#define REG_CP_COND_EXEC_2 0x00000002
+#define CP_COND_EXEC_2_ADDR1_LO__MASK 0xffffffff
+#define CP_COND_EXEC_2_ADDR1_LO__SHIFT 0
+static inline uint32_t CP_COND_EXEC_2_ADDR1_LO(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_2_ADDR1_LO__SHIFT) & CP_COND_EXEC_2_ADDR1_LO__MASK;
+}
+
+#define REG_CP_COND_EXEC_3 0x00000003
+#define CP_COND_EXEC_3_ADDR1_HI__MASK 0xffffffff
+#define CP_COND_EXEC_3_ADDR1_HI__SHIFT 0
+static inline uint32_t CP_COND_EXEC_3_ADDR1_HI(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_3_ADDR1_HI__SHIFT) & CP_COND_EXEC_3_ADDR1_HI__MASK;
+}
+
+#define REG_CP_COND_EXEC_4 0x00000004
+#define CP_COND_EXEC_4_REF__MASK 0xffffffff
+#define CP_COND_EXEC_4_REF__SHIFT 0
+static inline uint32_t CP_COND_EXEC_4_REF(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_4_REF__SHIFT) & CP_COND_EXEC_4_REF__MASK;
+}
+
+#define REG_CP_COND_EXEC_5 0x00000005
+#define CP_COND_EXEC_5_DWORDS__MASK 0xffffffff
+#define CP_COND_EXEC_5_DWORDS__SHIFT 0
+static inline uint32_t CP_COND_EXEC_5_DWORDS(uint32_t val)
+{
+ return ((val) << CP_COND_EXEC_5_DWORDS__SHIFT) & CP_COND_EXEC_5_DWORDS__MASK;
+}
+
+#define REG_CP_SET_CTXSWITCH_IB_0 0x00000000
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK 0xffffffff
+#define CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_0_ADDR_LO(uint32_t val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_0_ADDR_LO__SHIFT) & CP_SET_CTXSWITCH_IB_0_ADDR_LO__MASK;
+}
+
+#define REG_CP_SET_CTXSWITCH_IB_1 0x00000001
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK 0xffffffff
+#define CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_1_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_1_ADDR_HI__SHIFT) & CP_SET_CTXSWITCH_IB_1_ADDR_HI__MASK;
+}
+
+#define REG_CP_SET_CTXSWITCH_IB_2 0x00000002
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__MASK 0x000fffff
+#define CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT 0
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_DWORDS(uint32_t val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_2_DWORDS__SHIFT) & CP_SET_CTXSWITCH_IB_2_DWORDS__MASK;
+}
+#define CP_SET_CTXSWITCH_IB_2_TYPE__MASK 0x00300000
+#define CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT 20
+static inline uint32_t CP_SET_CTXSWITCH_IB_2_TYPE(enum ctxswitch_ib val)
+{
+ return ((val) << CP_SET_CTXSWITCH_IB_2_TYPE__SHIFT) & CP_SET_CTXSWITCH_IB_2_TYPE__MASK;
+}
+
+#define REG_CP_REG_WRITE_0 0x00000000
+#define CP_REG_WRITE_0_TRACKER__MASK 0x0000000f
+#define CP_REG_WRITE_0_TRACKER__SHIFT 0
+static inline uint32_t CP_REG_WRITE_0_TRACKER(enum reg_tracker val)
+{
+ return ((val) << CP_REG_WRITE_0_TRACKER__SHIFT) & CP_REG_WRITE_0_TRACKER__MASK;
+}
+
+#define REG_CP_REG_WRITE_1 0x00000001
+
+#define REG_CP_REG_WRITE_2 0x00000002
+
+#define REG_CP_SMMU_TABLE_UPDATE_0 0x00000000
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__SHIFT) & CP_SMMU_TABLE_UPDATE_0_TTBR0_LO__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_1 0x00000001
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK 0x0000ffff
+#define CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__SHIFT) & CP_SMMU_TABLE_UPDATE_1_TTBR0_HI__MASK;
+}
+#define CP_SMMU_TABLE_UPDATE_1_ASID__MASK 0xffff0000
+#define CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT 16
+static inline uint32_t CP_SMMU_TABLE_UPDATE_1_ASID(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_1_ASID__SHIFT) & CP_SMMU_TABLE_UPDATE_1_ASID__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_2 0x00000002
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__SHIFT) & CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR__MASK;
+}
+
+#define REG_CP_SMMU_TABLE_UPDATE_3 0x00000003
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK 0xffffffff
+#define CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT 0
+static inline uint32_t CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(uint32_t val)
+{
+ return ((val) << CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__SHIFT) & CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK__MASK;
+}
+
+#define REG_CP_START_BIN_BIN_COUNT 0x00000000
+
+#define REG_CP_START_BIN_PREFIX_ADDR 0x00000001
+
+#define REG_CP_START_BIN_PREFIX_DWORDS 0x00000003
+
+#define REG_CP_START_BIN_BODY_DWORDS 0x00000004
+
+#define REG_CP_WAIT_TIMESTAMP_0 0x00000000
+
+#define REG_CP_WAIT_TIMESTAMP_ADDR 0x00000001
+
+#define REG_CP_WAIT_TIMESTAMP_TIMESTAMP 0x00000003
+
+#define REG_CP_THREAD_CONTROL_0 0x00000000
+#define CP_THREAD_CONTROL_0_THREAD__MASK 0x00000003
+#define CP_THREAD_CONTROL_0_THREAD__SHIFT 0
+static inline uint32_t CP_THREAD_CONTROL_0_THREAD(enum cp_thread val)
+{
+ return ((val) << CP_THREAD_CONTROL_0_THREAD__SHIFT) & CP_THREAD_CONTROL_0_THREAD__MASK;
+}
+#define CP_THREAD_CONTROL_0_CONCURRENT_BIN_DISABLE 0x08000000
+#define CP_THREAD_CONTROL_0_SYNC_THREADS 0x80000000
+
+
+#endif /* ADRENO_PM4_XML */